diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2110878d0..af7d8f7a8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,7 +38,7 @@ repos: - id: numpydoc-validation exclude: '^imap_processing/tests/|.*test.*' - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.10.0' + rev: 'v1.10.1' hooks: - id: mypy exclude: .*(tests|docs).* diff --git a/examples/Dockerfile.processing b/examples/Dockerfile.processing index 6c2ad65a0..103787b80 100644 --- a/examples/Dockerfile.processing +++ b/examples/Dockerfile.processing @@ -4,7 +4,8 @@ FROM public.ecr.aws/docker/library/python:3.10-slim # TODO: delete this section once imap_processing is released ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y git -RUN pip install git+https://github.com/IMAP-Science-Operations-Center/imap_processing.git@dev +#RUN pip install git+https://github.com/IMAP-Science-Operations-Center/imap_processing.git@dev +RUN pip install git+https://github.com/maxinelasp/imap_processing.git@mag_l1a_compression # Uncomment this once imap_processing is released # RUN pip install imap_processing diff --git a/imap_processing/mag/constants.py b/imap_processing/mag/constants.py index 34e00e68b..2c7d4311a 100644 --- a/imap_processing/mag/constants.py +++ b/imap_processing/mag/constants.py @@ -2,6 +2,8 @@ from enum import Enum +import numpy as np + class DataMode(Enum): """ @@ -55,3 +57,52 @@ class PrimarySensor(Enum): MAGO = 0 MAGI = 1 + + +FIBONACCI_SEQUENCE = [ + 1, + 2, + 3, + 5, + 8, + 13, + 21, + 34, + 55, + 89, + 144, + 233, + 377, + 610, + 987, + 1597, + 2584, + 4181, + 6765, + 10946, + 17711, + 28657, + 46368, + 75025, + 121393, + 196418, + 317811, + 514229, + 832040, + 1346269, + 2178309, + 3524578, + 5702887, + 9227465, + 14930352, + 24157817, + 39088169, + 63245986, + 102334155, + 165580141, +] + +MAX_FINE_TIME = np.iinfo(np.uint16).max # maximum 16 bit unsigned int +AXIS_COUNT = 3 +RANGE_BIT_WIDTH = 2 +MAX_COMPRESSED_VECTOR_BITS = 60 diff --git a/imap_processing/mag/l0/mag_l0_data.py b/imap_processing/mag/l0/mag_l0_data.py index c1569d132..8427a2a10 100644 --- a/imap_processing/mag/l0/mag_l0_data.py +++ b/imap_processing/mag/l0/mag_l0_data.py @@ -106,19 +106,13 @@ def __post_init__(self) -> None: Also convert encoded "VECSEC" (vectors per second) into proper vectors per second values. """ - # Convert string output from space_packet_parser to numpy array of - # big-endian bytes - self.VECTORS = np.frombuffer( - int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"), # type: ignore[arg-type] - # TODO Check MYPY Error: Argument 1 to "int" has incompatible type - # "Union[ndarray[Any, Any], str]"; expected "Union[str, bytes, bytearray]" - dtype=np.dtype(">b"), - ) - - # Remove buffer from end of vectors. Vector data needs to be in 50 bit chunks, - # and may have an extra byte at the end from CCSDS padding. - if len(self.VECTORS) % 2: - self.VECTORS = self.VECTORS[:-1] + if isinstance(self.VECTORS, str): + # Convert string output from space_packet_parser to numpy array of + # big-endian bytes + self.VECTORS = np.frombuffer( + int(self.VECTORS, 2).to_bytes(len(self.VECTORS) // 8, "big"), + dtype=np.dtype(">B"), + ) self.PRI_VECSEC = 2**self.PRI_VECSEC self.SEC_VECSEC = 2**self.SEC_VECSEC diff --git a/imap_processing/mag/l1a/mag_l1a.py b/imap_processing/mag/l1a/mag_l1a.py index c6d31db1c..4dc3df2be 100644 --- a/imap_processing/mag/l1a/mag_l1a.py +++ b/imap_processing/mag/l1a/mag_l1a.py @@ -143,9 +143,6 @@ def process_packets( mago = {} for mag_l0 in mag_l0_list: - if mag_l0.COMPRESSION: - raise NotImplementedError("Unable to process compressed data") - primary_start_time = TimeTuple(mag_l0.PRI_COARSETM, mag_l0.PRI_FNTM) secondary_start_time = TimeTuple(mag_l0.SEC_COARSETM, mag_l0.SEC_FNTM) @@ -181,14 +178,11 @@ def process_packets( # now we know the number of secs of data in the packet, and the data rates of # each sensor, we can calculate how much data is in this packet and where the # byte boundaries are. - primary_vectors, secondary_vectors = MagL1a.process_vector_data( - mag_l0.VECTORS.astype(dtype=np.int32), # type: ignore[union-attr] - # TODO Maybe Change, Item "str" of "Union[Any, str]" - # has no attribute "astype" - # this is because mypy expects both to have the attributes + mag_l0.VECTORS, # type: ignore primary_packet_data.total_vectors, secondary_packet_data.total_vectors, + mag_l0.COMPRESSION, ) primary_timestamped_vectors = MagL1a.calculate_vector_time( diff --git a/imap_processing/mag/l1a/mag_l1a_data.py b/imap_processing/mag/l1a/mag_l1a_data.py index d50f6018a..48a67f8b2 100644 --- a/imap_processing/mag/l1a/mag_l1a_data.py +++ b/imap_processing/mag/l1a/mag_l1a_data.py @@ -2,6 +2,7 @@ from __future__ import annotations +import math from dataclasses import InitVar, dataclass, field from math import floor @@ -9,10 +10,15 @@ import numpy.typing as npt from imap_processing.cdf.utils import J2000_EPOCH +from imap_processing.mag.constants import ( + AXIS_COUNT, + FIBONACCI_SEQUENCE, + MAX_COMPRESSED_VECTOR_BITS, + MAX_FINE_TIME, + RANGE_BIT_WIDTH, +) from imap_processing.spice.time import met_to_j2000ns -MAX_FINE_TIME = 65535 # maximum 16 bit unsigned int - @dataclass class TimeTuple: @@ -72,7 +78,7 @@ def to_seconds(self) -> float: seconds : float Time in seconds. """ - return self.coarse_time + self.fine_time / MAX_FINE_TIME + return float(self.coarse_time + self.fine_time / MAX_FINE_TIME) @dataclass @@ -187,6 +193,13 @@ class MagL1a: append_vectors() calculate_vector_time() process_vector_data() + process_uncompressed_vectors() + process_compressed_vectors() + process_range_data_section() + accumulate_vectors() + unpack_one_vector() + decode_fib_zig_zag() + twos_complement() """ is_mago: bool @@ -288,10 +301,54 @@ def calculate_vector_time( @staticmethod def process_vector_data( + vector_data: np.ndarray, + primary_count: int, + secondary_count: int, + compression: int, + ) -> tuple[np.ndarray, np.ndarray]: + """ + Transform raw vector data into Vectors. + + Vectors are grouped into primary sensor and secondary sensor, and returned as a + tuple (primary sensor vectors, secondary sensor vectors). + + Parameters + ---------- + vector_data : numpy.ndarray + Raw vector data, in bytes. Contains both primary and secondary vector data. + Can be either compressed or uncompressed. + primary_count : int + Count of the number of primary vectors. + secondary_count : int + Count of the number of secondary vectors. + compression : int + Flag indicating if the data is compressed (1) or uncompressed (0). + + Returns + ------- + (primary, secondary): (numpy.ndarray, numpy.ndarray) + Two arrays, each containing tuples of (x, y, z, sample_range) for each + vector sample. + """ + if compression: + # If the vectors are compressed, we need them to be uint8 to convert to + # bits. + return MagL1a.process_compressed_vectors( + vector_data.astype(np.uint8), primary_count, secondary_count + ) + + # If the vectors are uncompressed, we need them to be int32, as there are + # bitshifting operations. Either way, the return type should be int32. + return MagL1a.process_uncompressed_vectors( + vector_data.astype(np.int32), primary_count, secondary_count + ) + + @staticmethod + def process_uncompressed_vectors( vector_data: np.ndarray, primary_count: int, secondary_count: int ) -> tuple[np.ndarray, np.ndarray]: """ - Given raw packet data, process into Vectors. + Given raw uncompressed packet data, process into Vectors. Vectors are grouped into primary sensor and secondary sensor, and returned as a tuple (primary sensor vectors, secondary sensor vectors). @@ -315,7 +372,6 @@ def process_vector_data( vector sample. """ - # TODO: error handling def to_signed16(n: int) -> int: """ Convert an integer to a signed 16-bit integer. @@ -337,10 +393,6 @@ def to_signed16(n: int) -> int: primary_vectors = [] secondary_vectors = [] - # To avoid overflows, we need to cast the potentially 8 bit signed integers to - # int32 before the bitshifting operations below. - vector_data = vector_data.astype(np.int32) - # Since the vectors are stored as 50 bit chunks but accessed via hex (4 bit # chunks) there is some shifting required for processing the bytes. # However, from a bit processing perspective, the first 48 bits of each 50 bit @@ -430,6 +482,525 @@ def to_signed16(n: int) -> int: secondary_vectors.append(vector) return ( - np.array(primary_vectors, dtype=np.int64), - np.array(secondary_vectors, dtype=np.int64), + np.array(primary_vectors, dtype=np.int32), + np.array(secondary_vectors, dtype=np.int32), + ) + + @staticmethod + def process_compressed_vectors( + vector_data: np.ndarray, primary_count: int, secondary_count: int + ) -> tuple[np.ndarray, np.ndarray]: + """ + Given raw compressed packet data, process into Vectors. + + To do this, we need to decode the compressed data. The compressed data starts + with an 8 bit header that defines the width of the uncompressed vectors and + if there is a range data section. Then, the vector data follows, then the range + data section if it exists. + + To decode, we start by decoding the first compression_width bits. This is an + uncompressed primary vector with range. Then, we proceed through the compressed + data, where each value is fibonacci and zig-zag encoded. This means each value + ends in 2 sequential ones (11). We split the data along these numbers until + we reach primary_count vectors. + + The secondary vectors are decoded the same way, starting directly after the last + primary vector with an uncompressed secondary starting vector and then + secondary_count compressed vectors. + + The compressed values are differences from the previous vector, so after + decoding we accumulate the values starting from the first known vector. The + range data is copied from the starting vector if range_data_section is not + included. + + Then, if a range data section is included, we decode it and assign it to each + vector. There are 2 * (primary_count + secondary_count) bits assigned for the + range data section. + + If any compressed vectors are > 60 bits long (MAX_COMPRESSED_VECTOR_BITS), then + we switch to uncompressed vectors for the rest of the processing. + + Parameters + ---------- + vector_data : numpy.ndarray + Raw vector data, in bytes. Contains both primary and secondary vector data. + primary_count : int + Count of the number of primary vectors. + secondary_count : int + Count of the number of secondary vectors. + + Returns + ------- + (primary, secondary): (numpy.ndarray, numpy.ndarray) + Two arrays, each containing tuples of (x, y, z, sample_range) for each + vector sample. + """ + bit_array = np.unpackbits(vector_data) + # The first 8 bits are a header - 6 bits to indicate the compression width, + # 1 bit to indicate if there is a range data section, and 1 bit spare. + compression_width = int("".join([str(i) for i in bit_array[:6]]), 2) + has_range_data_section = bit_array[6] == 1 + + # The full vector includes 3 values of compression_width bits, and excludes + # range. + uncompressed_vector_size = compression_width * AXIS_COUNT + # plus 8 to get past the compression width and range data section + first_vector_width = uncompressed_vector_size + 8 + RANGE_BIT_WIDTH + first_vector = MagL1a.unpack_one_vector( + bit_array[8:first_vector_width], compression_width, True + ) + + # The range data length has 2 bits per vector, minus 2 for the uncompressed + # first vectors in the primary and secondary sensors. + # Then, the range data length is padded to the nearest 8 bits. + expected_range_data_length = (primary_count + secondary_count - 2) * 2 + end_padding = expected_range_data_length // 8 * 8 - expected_range_data_length + + end_vector = len(bit_array) - ( + (expected_range_data_length - end_padding) * has_range_data_section + ) + + # Cut off the first vector width and the end range data section if it exists. + vector_bits = bit_array[first_vector_width - 1 : end_vector] + + # Shift the bit array over one to the left, then sum them up. This is used to + # find all the places where two 1s occur next to each other, because the sum + # will be 2 for those indices. + # For example: [0 0 1 0 1 1] + [1 0 0 1 0 1] = [1 0 1 1 1 2], so the last index + # has 2 ones in a row. + # The first bit is invalid, so we remove it at the end. + sequential_ones = np.where( + np.add( + vector_bits, + np.roll(vector_bits, 1), + )[1:] + == 2 + )[0] + # The first bit is only needed for the np.roll step, so now we remove it. + # we are left with compressed primary vectors, and all the secondary vectors. + vector_bits = vector_bits[1:] + + # that unneeded first bit might give us a false first sequential ones value, + # so if the first index has 2 ones, skip it. + primary_boundaries = [ + ( + sequential_ones[0] + 1 + if sequential_ones[0] != 0 + else sequential_ones[1] + 1 + ) + ] + secondary_boundaries = [] + vector_count = 1 + end_primary_vector = 0 + for seq_val in sequential_ones: + if vector_count > primary_count + secondary_count: + break + # Add the end indices of each primary vector to primary_boundaries + # If we have 3 ones in a row, we should skip that index + + if vector_count < primary_count and (seq_val - primary_boundaries[-1] > 0): + primary_boundaries.append(seq_val + 1) + + # 3 boundaries equal one vector + if len(primary_boundaries) % AXIS_COUNT == 0: + vector_count += 1 + # If the vector length is >60 bits, we switch to uncompressed. + # So we skip past all the remaining seq_ones. + if ( + (len(primary_boundaries) > 4) + and ( + primary_boundaries[-1] - primary_boundaries[-4] + > MAX_COMPRESSED_VECTOR_BITS + ) + or ( + vector_count == 2 + and primary_boundaries[-1] > MAX_COMPRESSED_VECTOR_BITS + ) + ): + # Since we know how long each uncompressed vector is, + # we can determine the end of the primary vectors. + end_primary_vector = ( + primary_boundaries[-1] + + (primary_count - vector_count) * uncompressed_vector_size + ) + vector_count = primary_count + + # If the vector count is equal to the primary count, we are in the first + # uncompressed secondary vector. + if vector_count == primary_count: + # We won't have assigned end_primary_vector unless we hit uncompressed + # vectors in the primary path. If there are no uncompressed values, + # we can use the end of primary_boundaries. + end_primary_vector = ( + primary_boundaries[-1] + if end_primary_vector == 0 + else end_primary_vector + ) + if seq_val > end_primary_vector + uncompressed_vector_size + 2: + # Split just after the uncompressed secondary vector + secondary_boundaries = [ + end_primary_vector + uncompressed_vector_size + 2 + ] + # We have found the first secondary vector + secondary_boundaries += [seq_val + 1] + vector_count += 1 + + # If we're greater than primary_count, we are in the secondary vectors. + # Like before, we skip indices with 3 ones. + if vector_count > primary_count and seq_val - secondary_boundaries[-1] > 0: + secondary_boundaries.append(seq_val + 1) + # We have the start of the secondary vectors in + # secondary_boundaries, so we need to subtract one to determine + # the vector count. (in primary_boundaries we know we start at 0.) + if (len(secondary_boundaries) - 1) % AXIS_COUNT == 0: + vector_count += 1 + if ( + secondary_boundaries[-1] - secondary_boundaries[-4] + > MAX_COMPRESSED_VECTOR_BITS + ): + # The rest of the secondary values are uncompressed. + vector_count = primary_count + secondary_count + 1 + + # Split along the boundaries of the primary vectors. This gives us a list of + # bit arrays, each corresponding to a primary value (1/3 of a vector). + primary_split_bits = np.split( + vector_bits, + primary_boundaries, + )[:-1] + primary_vectors = MagL1a._process_vector_section( + vector_bits, + primary_split_bits, + primary_boundaries[-1], + first_vector, + primary_count, + uncompressed_vector_size, + compression_width, + ) + + # Secondary vector processing + first_secondary_vector = MagL1a.unpack_one_vector( + vector_bits[ + end_primary_vector : end_primary_vector + uncompressed_vector_size + 2 + ], + compression_width, + True, + ) + + # Split up the bit array, skipping past the primary vector and uncompressed + # starting vector + secondary_split_bits = np.split( + vector_bits[: secondary_boundaries[-1]], secondary_boundaries[:-1] + )[1:] + + secondary_vectors = MagL1a._process_vector_section( + vector_bits, + secondary_split_bits, + secondary_boundaries[-1], + first_secondary_vector, + secondary_count, + uncompressed_vector_size, + compression_width, + ) + + # If there is a range data section, it describes all the data, compressed or + # uncompressed. + if has_range_data_section: + primary_vectors = MagL1a.process_range_data_section( + bit_array[end_vector : end_vector + (primary_count - 1) * 2], + primary_vectors, + ) + secondary_vectors = MagL1a.process_range_data_section( + bit_array[ + end_vector + (primary_count - 1) * 2 : end_vector + + (primary_count + secondary_count - 2) * 2 + ], + secondary_vectors, + ) + return primary_vectors, secondary_vectors + + @staticmethod + def _process_vector_section( # noqa: PLR0913 + vector_bits: np.ndarray, + split_bits: list, + last_index: int, + first_vector: np.ndarray, + vector_count: int, + uncompressed_vector_size: int, + compression_width: int, + ) -> np.ndarray: + """ + Generate a section of vector data, primary or secondary. + + Should only be used by process_compressed_vectors. + + Parameters + ---------- + vector_bits : numpy.ndarray + Numpy array of bits, representing the vector data. Does not include the + first primary vector. + split_bits : list + An array of where to split vector bits, by passing in a list of indices. + last_index : int + The index of the last vector in the section (primary or secondary). + first_vector : numpy.ndarray + The first vector in the section, (x, y, z, range). + vector_count : numpy.ndarray + The number of vectors in the section (primary or secondary). + uncompressed_vector_size : int + The size of an uncompressed vector in bits. + compression_width : int + The width of the uncompressed values - uncompressed_vector_size/3. + + Returns + ------- + numpy.ndarray + An array of processed vectors. + """ + vector_diffs = list(map(MagL1a.decode_fib_zig_zag, split_bits)) + vectors = MagL1a.convert_diffs_to_vectors( + first_vector, vector_diffs, vector_count + ) + # If we are missing any vectors from primary_split_bits, we know we have + # uncompressed vectors to process. + compressed_count = math.ceil(len(split_bits) / AXIS_COUNT) + 1 + uncompressed_count = vector_count - compressed_count + + if uncompressed_count: + end = last_index + uncompressed_vector_size * uncompressed_count + uncompressed_vectors = vector_bits[last_index : end + 1] + + for i in range(uncompressed_count): + decoded_vector = MagL1a.unpack_one_vector( + uncompressed_vectors[ + i * uncompressed_vector_size : (i + 1) + * uncompressed_vector_size + ], + compression_width, + False, + ) + vectors[i + compressed_count] = decoded_vector + vectors[i + compressed_count][3] = vectors[0][3] + + return vectors + + @staticmethod + def process_range_data_section( + range_data: np.ndarray, vectors: np.ndarray + ) -> np.ndarray: + """ + Given a range data section and vectors, return an updated vector array. + + Each range value has 2 bits. range_data will have a length of n*2, where n is + the number of vectors in vectors. + + Parameters + ---------- + range_data : numpy.ndarray + Array of range values, where each value is one bit. The range values have + 2 bits per vector, so range data should be 2 * len(vectors) - 1 in length. + vectors : numpy.ndarray + Array of vectors, where each vector is a tuple of (x, y, z, range). + The range value will be overwritten by range_data, and x, y, z will remain + the same. + + Returns + ------- + numpy.ndarray + Updated array of vectors, identical to vectors with the range values + updated from range_data. + """ + if len(range_data) != (len(vectors) - 1) * 2: + raise ValueError( + "Incorrect length for range_data, there should be two bits per vector, " + "excluding the first." + ) + + updated_vectors: np.ndarray = np.copy(vectors) + range_str = "".join([str(i) for i in range_data]) + for i in range(len(vectors) - 1): + range_int = int(range_str[i * 2 : i * 2 + 2], 2) + updated_vectors[i + 1][3] = range_int + return updated_vectors + + @staticmethod + def convert_diffs_to_vectors( + first_vector: np.ndarray, + vector_differences: list[int], + vector_count: int, + ) -> np.ndarray: + """ + Given a list of differences and the first vector, return calculated vectors. + + This is calculated as follows: + vector[i][0] = vector[i-1][0] + vector_differences[i][0] + vector[i][1] = vector[i-1][1] + vector_differences[i][1] + vector[i][2] = vector[i-1][2] + vector_differences[i][2] + vector[i][3] = first_vector[3] + + The third element of the array is the range value, which we assume is the same + as the first vector. + + Parameters + ---------- + first_vector : numpy.ndarray + A numpy array of 3 signed integers and a range value, representing the + start vector. + vector_differences : numpy.ndarray + A numpy array of shape (expected_vector_count, 4) of signed integers, + representing the differences between vectors. + vector_count : int + The expected number of vectors in the output. + + Returns + ------- + numpy.ndarray + A numpy array of shape (expected_vector_count, 4) of signed integers, + representing the calculated vectors. + """ + vectors: np.ndarray = np.empty((vector_count, 4), dtype=np.int32) + vectors[0] = first_vector + if len(vector_differences) % AXIS_COUNT != 0: + raise ValueError( + "Error! Computed compressed vector differences are not " + "divisible by 3 - meaning some data is missing. " + "Expected length: %s, actual length: " + "%s", + vector_count * AXIS_COUNT, + len(vector_differences), + ) + index = 0 + vector_index = 1 + for diff in vector_differences: + vectors[vector_index][index] = vectors[vector_index - 1][index] + diff + index += 1 + if index == 3: + # Update range section to match that of the first vector + vectors[vector_index][3] = vectors[0][3] + index = 0 + vector_index += 1 + return vectors + + @staticmethod + def unpack_one_vector( + vector_data: np.ndarray, width: int, has_range: int + ) -> np.ndarray: + """ + Unpack a single vector from the vector data. + + Input should be a numpy array of bits, eg [0, 0, 0, 1], of the length width*3, + or width*3 + 2 if has_range is True. + + Parameters + ---------- + vector_data : numpy.ndarray + Vector data for the vector to unpack. This is uncompressed data as a numpy + array of bits (the output of np.unpackbits). + width : int + The width of each vector component in bits. This needs to be a multiple of + 8 (including only whole bytes). + has_range : int + 1 if the vector data includes range data, 0 if not. The first vector always + has range data. + + Returns + ------- + numpy.ndarray + Unpacked vector data as a numpy array of 3 signed ints plus a range (0 if + has_range is False). + """ + if np.any(vector_data > 1): + raise ValueError( + "unpack_one_vector method is expecting an array of bits as" "input." + ) + + if len(vector_data) != width * AXIS_COUNT + RANGE_BIT_WIDTH * has_range: + raise ValueError( + f"Invalid length {len(vector_data)} for vector data. Expected " + f"{width * AXIS_COUNT} or {width * AXIS_COUNT + RANGE_BIT_WIDTH} if " + f"has_range." + ) + padding = np.zeros(8 - (width % 8), dtype=np.uint8) + + # take slices of the input data and pack from an array of bits to an array of + # uint8 bytes + x = np.packbits(np.concatenate((padding, vector_data[:width]))) + y = np.packbits(np.concatenate((padding, vector_data[width : 2 * width]))) + z = np.packbits(np.concatenate((padding, vector_data[2 * width : 3 * width]))) + + range_string = "".join([str(i) for i in vector_data[-2:]]) + + rng = int(range_string, 2) if has_range else 0 + + # Convert to signed integers using twos complement + signed_vals: np.ndarray = np.array( + [ + MagL1a.twos_complement(x, width), + MagL1a.twos_complement(y, width), + MagL1a.twos_complement(z, width), + rng, + ], + dtype=np.int32, ) + return signed_vals + + @staticmethod + def twos_complement(value: np.ndarray, bits: int) -> np.int32: + """ + Compute the two's complement of an integer. + + This function will return the two's complement of a given bytearray value. + The input value should be a bytearray or a numpy array of uint8 values. + + If the integer with respect to the number of bits does not have a sign bit + set (first bit is 0), then the input value is returned without modification. + + Parameters + ---------- + value : numpy.ndarray + An array of bytes representing an integer. In numpy, this should be an + array of uint8 values. + bits : int + Number of bits to use for the 2's complement. + + Returns + ------- + numpy.int32 + Two's complement of the input value, as a signed int. + """ + integer_value = int.from_bytes(value, "big") + if (integer_value & (1 << (bits - 1))) != 0: + output_value = integer_value - (1 << bits) + else: + output_value = integer_value + return np.int32(output_value) + + @staticmethod + def decode_fib_zig_zag(code: np.ndarray) -> int: + """ + Decode a fibonacci and zig-zag encoded value. + + Parameters + ---------- + code : numpy.ndarray + The code to decode, in the form of an array of bits (eg [0, 1, 0, 1, 1]). + This should always end in 2 ones (which indicates the end of a fibonacci + encoding). + + Returns + ------- + value: int + Signed integer value, with fibonacci and zig-zag encoding removed. + """ + if len(code) < 2 or code[-2] != 1 or code[-1] != 1: + raise ValueError( + f"Error when decoding {code} - fibonacci encoded values " + f"should end in 2 sequential ones." + ) + + # Fibonacci decoding + code = code[:-1] + value: int = sum(FIBONACCI_SEQUENCE[: len(code)] * code) - 1 + # Zig-zag decode (to go from uint to signed int) + value = int((value >> 1) ^ (-(value & 1))) + + return value diff --git a/imap_processing/tests/mag/test_mag_decom.py b/imap_processing/tests/mag/test_mag_decom.py index 0e790b528..968e28336 100644 --- a/imap_processing/tests/mag/test_mag_decom.py +++ b/imap_processing/tests/mag/test_mag_decom.py @@ -47,8 +47,10 @@ def test_mag_decom(): # Remove bytes for header and previous attributes from CCSDS_HEX, # remaining bytes are vectors + # This also removes the buffer from the end of the vectors. The buffer is + # not part of the validation data, but does not affect processing. assert ( - test.VECTORS.tobytes().hex() + test.VECTORS.tobytes().hex()[:-2] == expected_output["CCSDS_HEX"][index][54:].lower() ) diff --git a/imap_processing/tests/mag/test_mag_l1a.py b/imap_processing/tests/mag/test_mag_l1a.py index c15fca7b5..b01988e1d 100644 --- a/imap_processing/tests/mag/test_mag_l1a.py +++ b/imap_processing/tests/mag/test_mag_l1a.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd +import pytest from imap_processing.mag.l0.decom_mag import decom_packets from imap_processing.mag.l1a.mag_l1a import mag_l1a, process_packets @@ -14,12 +15,396 @@ from imap_processing.spice.time import met_to_j2000ns +@pytest.fixture() +def uncompressed_vector_bytearray(): + input_data = np.array( + [ + 2, + 4, + 8, + 16, + 16, + 32, + 192, + 129, + 194, + 7, + 68, + 14, + 176, + 32, + 160, + 130, + 161, + 5, + 76, + 8, + 52, + 32, + 220, + 65, + 191, + 2, + 17, + 8, + 68, + 16, + 137, + 192, + 133, + 2, + 20, + 132, + 41, + 48, + 33, + 112, + 133, + 241, + 11, + 236, + 8, + 108, + 33, + 176, + 67, + 99, + 2, + 30, + 8, + 121, + 16, + 243, + 192, + 136, + 66, + 33, + 132, + 67, + 112, + 34, + 64, + 137, + 49, + 18, + 124, + 8, + 160, + 34, + 132, + 69, + 11, + 2, + 43, + 8, + 174, + 17, + 92, + 192, + 139, + 130, + 46, + 196, + 93, + 176, + 35, + 32, + 140, + 129, + 25, + 28, + 8, + 212, + 35, + 84, + 70, + 175, + 2, + 3, + 8, + 15, + 16, + 31, + 192, + 129, + 194, + 7, + 4, + 14, + 112, + 32, + 160, + 130, + 161, + 5, + 76, + 8, + 52, + 32, + 220, + 65, + 187, + 2, + 17, + 8, + 68, + 16, + 136, + 192, + 133, + 2, + 20, + 68, + 40, + 240, + 33, + 112, + 133, + 225, + 11, + 220, + 8, + 104, + 33, + 172, + 67, + 95, + 2, + 30, + 8, + 121, + 16, + 242, + 192, + 136, + 66, + 33, + 132, + 67, + 48, + 34, + 64, + 137, + 49, + 18, + 124, + 8, + 160, + 34, + 128, + 69, + 7, + 2, + 43, + 8, + 173, + 17, + 91, + 192, + 139, + 130, + 46, + 196, + 93, + 176, + 35, + 32, + 140, + 129, + 25, + 12, + 8, + 212, + 35, + 84, + 70, + 171, + ], + dtype=np.uint8, + ) + return input_data + + +@pytest.fixture() +def expected_vectors(): + primary_expected = np.array( + [ + [516, 2064, 4128, 3], + [519, 2077, 4154, 3], + [522, 2090, 4180, 3], + [525, 2103, 4207, 3], + [529, 2116, 4233, 3], + [532, 2130, 4260, 3], + [535, 2143, 4286, 3], + [539, 2156, 4312, 3], + [542, 2169, 4339, 3], + [545, 2182, 4365, 3], + [548, 2195, 4391, 3], + [552, 2209, 4418, 3], + [555, 2222, 4444, 3], + [558, 2235, 4470, 3], + [562, 2248, 4497, 3], + [565, 2261, 4523, 3], + ] + ) + + secondary_expected = np.array( + [ + [515, 2063, 4127, 3], + [519, 2076, 4153, 3], + [522, 2090, 4180, 3], + [525, 2103, 4206, 3], + [529, 2116, 4232, 3], + [532, 2129, 4259, 3], + [535, 2142, 4285, 3], + [538, 2155, 4311, 3], + [542, 2169, 4338, 3], + [545, 2182, 4364, 3], + [548, 2195, 4391, 3], + [552, 2208, 4417, 3], + [555, 2221, 4443, 3], + [558, 2235, 4470, 3], + [562, 2248, 4496, 3], + [565, 2261, 4522, 3], + ] + ) + + return (primary_expected, secondary_expected) + + +@pytest.fixture() +def raw_compressed_vectors(): + # compressed vectors, without the first starting uncompressed vector. + # 15 primary vectors and 15 secondary vectors, corresponding to most of + # the vectors in expected_vectors. + primary_compressed = ( + "0101110010" + "011100101011010111001001110010101101011100100110000000011100" + "011100100111001010110101100001011000000001101011100100111001" + "010111000111001001110010101101011100100110000000011010111001" + "001110010101101011100100111001010111000110000101100000000110" + "101110010011100101011010111001001110010101110001110010011000" + "00000110101110010011100101011" + ) + + secondary_compressed = ( + "10001110" + "0100111001010110101100001011000000001101011100100111001010" + "1110001110010011100101011010111001001100000000110101110010" + "0111001010110101110010011100101011100011000010110000000011" + "0101110010011100101011010111001001100000000111000111001001" + "1100101011010111001001110010101101011000010110000000011100" + "011100100111001010110101110010011100101011" + ) + return primary_compressed, secondary_compressed + + +def test_different_vector_rates( + uncompressed_vector_bytearray, expected_vectors, raw_compressed_vectors +): + current_directory = Path(__file__).parent + test_file = current_directory / "mag_l1_test_data.pkts" + # Test file contains only normal packets + l0 = decom_packets(test_file)["norm"][0] + + # overwrite vectors and different vector rates + + l0.PRI_VECSEC = 4 # twice as many primary vectors as secondary vectors - 32 vectors + l0.VECTORS = np.concatenate( + ( + uncompressed_vector_bytearray[:100], + uncompressed_vector_bytearray[:100], + uncompressed_vector_bytearray[100:], + ) + ) + l1 = process_packets([l0]) + expected_day = np.datetime64("2023-11-30") + + assert len(l1["magi"][expected_day].vectors) == 16 + assert len(l1["mago"][expected_day].vectors) == 32 + + assert np.array_equal( + l1["mago"][expected_day].vectors[:, :4], + np.concatenate((expected_vectors[0], expected_vectors[0])), + ) + assert np.array_equal(l1["magi"][expected_day].vectors[:, :4], expected_vectors[1]) + + # compressed data + # Compression headers - indicating a 16 bit width and no range section + headers = "01000000" + + # 50 bits each - 16 bits per vector value, 2 bits for range. + first_primary_vector = "00000010000001000000100000010000000100000010000011" + first_secondary_vector = "00000010000000110000100000001111000100000001111111" + + primary_compressed = ( + first_primary_vector + raw_compressed_vectors[0] + raw_compressed_vectors[0] + ) + secondary_compressed = first_secondary_vector + raw_compressed_vectors[1] + + input_data = np.array( + [int(i) for i in headers + primary_compressed + secondary_compressed], + dtype=np.uint8, + ) + + # Will be the input data format + input_data = np.packbits(input_data) + + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 31, 16) + + assert (primary > 0).all() + assert np.array_equal(primary[:16], expected_vectors[0]) + assert np.array_equal(secondary, expected_vectors[1]) + + +def test_padding_uncompressed(expected_vectors): + # Test if the padding falls directly on a byte boundary + headers = "01000000" + + # 50 bits each - 16 bits per vector value, 2 bits for range. + first_primary_vector = "00000010000001000000100000010000000100000010000011" + first_secondary_vector = "00000010000000110000100000001111000100000001111111" + + primary_vectors = "11111111111011" + secondary_vectors = "11111111111011" + + last_vector = expected_vectors[0][0].copy() + last_vector[2] = last_vector[2] - 2 + + input_data = np.array( + [ + int(i) + for i in headers + + first_primary_vector + + primary_vectors + + first_secondary_vector + + secondary_vectors + ], + dtype=np.uint8, + ) + + # Will be the input data format + input_data = np.packbits(input_data) + + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 3, 3) + + assert np.array_equal(primary[0], expected_vectors[0][0]) + assert np.array_equal(primary[1], expected_vectors[0][0]) + assert np.array_equal(primary[2], last_vector) + + last_vector = expected_vectors[1][0].copy() + last_vector[2] = last_vector[2] - 2 + + assert np.array_equal(secondary[0], expected_vectors[1][0]) + assert np.array_equal(secondary[1], expected_vectors[1][0]) + assert np.array_equal(secondary[2], last_vector) + + def test_compare_validation_data(): current_directory = Path(__file__).parent test_file = current_directory / "mag_l1_test_data.pkts" # Test file contains only normal packets l0 = decom_packets(test_file) - l1 = process_packets(l0["norm"]) # Should have one day of data expected_day = np.datetime64("2023-11-30") @@ -47,7 +432,297 @@ def test_compare_validation_data(): assert l1_magi.vectors[index][3] == validation_data["rng_sec"][index] -def test_process_vector_data(): +def test_compressed_vector_data(expected_vectors, raw_compressed_vectors): + # Values from test packet + primary_expected = expected_vectors[0] + secondary_expected = expected_vectors[1] + + # Compression headers - indicating a 16 bit width and no range section + headers = "01000000" + + # 50 bits each - 16 bits per vector value, 2 bits for range. + first_primary_vector = "00000010000001000000100000010000000100000010000011" + first_secondary_vector = "00000010000000110000100000001111000100000001111111" + + primary_compressed = first_primary_vector + raw_compressed_vectors[0] + secondary_compressed = first_secondary_vector + raw_compressed_vectors[1] + + padding = "00000" # Pad to byte boundary + input_data = np.array( + [int(i) for i in headers + primary_compressed + secondary_compressed + padding], + dtype=np.uint8, + ) + + # Will be the input data format + input_data = np.packbits(input_data) + + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 16, 16) + + assert np.array_equal(primary[0], primary_expected[0]) + assert np.array_equal(secondary[0], secondary_expected[0]) + + # There should be 16 vectors for both primary and secondary, with 4 values per + # vector. + assert primary.shape[0] == 16 + assert secondary.shape[0] == 16 + + assert primary.shape[1] == 4 + assert secondary.shape[1] == 4 + + assert np.array_equal(primary, primary_expected) + assert np.array_equal(secondary, secondary_expected) + + # range data has 2 bits per vector, primary and then secondary in sequence. + # It excludes the first vector, making the length (primary_count - 1) * 2 + range_primary = "000000000000000101101011111111" + + # This includes the first range value and all the ranges from range_primary + expected_range_primary = [3, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3] + range_secondary = "000000000000000101101011111101" + expected_range_secondary = [3, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 1] + # 16 bit width with range section + headers = "01000010" + input_data = np.array( + [ + int(i) + for i in headers + + primary_compressed + + secondary_compressed + + padding + + range_primary + + range_secondary + ], + dtype=np.uint8, + ) + + # In this step, input_data is automatically padded to a byte boundary by adding + # zeros to the end + input_data = np.packbits(input_data) + + for i in range(len(expected_range_primary)): + primary_expected[i][3] = expected_range_primary[i] + secondary_expected[i][3] = expected_range_secondary[i] + + (primary_with_range, secondary_with_range) = MagL1a.process_compressed_vectors( + input_data, 16, 16 + ) + + assert primary_with_range.shape[0] == 16 + assert secondary_with_range.shape[0] == 16 + + assert np.array_equal(primary_with_range, primary_expected) + assert np.array_equal(secondary_with_range, secondary_expected) + + +def test_switch_to_uncompressed_vector_data( + expected_vectors, uncompressed_vector_bytearray +): + primary_compressed = ( + "000000100000010000001000000100000001000000100000110101110010" + "011100101011010111001001110010101101011100100110000000011100" + "011100100111001010110101100001011000000001101011100100111001" + "010111000111001001110010101101011100100110000000011010111001" + "001110010101101011100100111001010111000110000101100000000110" + "101110010011100101011010111001001110010101110001110010011000" + "000001100000000000000000000010111000000000000000000000010011" + "000000000000000000000000100101011" + ) + + # 4 uncompressed vectors from uncompressed_vector_bytearray + uncompressed_bits = ( + "000000100000010000001000000100000001000000100000" + "000000100000011100001000000111010001000000111010" + "000000100000101000001000001010100001000001010100" + "000000100000110100001000001101110001000001101111" + ) + + secondary_compressed = ( + "0000001000000011000010000000111100010000000111111110001110" + "0100111001010110101100001011000000001101011100100111001010" + "1110001110010011100101011010111001001100000000110101110010" + "0111001010110101110010011100101011100011000010110000000011" + "0101110010011100101011010111001001100000000111000111001001" + "1100101011010111001001110010101101011000010110000000011100" + "0111001001110010101100000000000000101110000000000000000000" + "00010011100101000000000011" + ) + + uncompressed_expected_vectors = expected_vectors[0][:4] + + headers = "01000000" + + input_data = np.array( + [ + int(i) + for i in headers + + primary_compressed + + uncompressed_bits + + secondary_compressed + + uncompressed_bits + ], + dtype=np.uint8, + ) + + input_data = np.packbits(input_data) + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 20, 20) + + # The 16th compressed vector is bad because it needs to be >60 bits + assert np.array_equal(primary[:15], expected_vectors[0][:-1]) + assert np.array_equal(primary[16:], uncompressed_expected_vectors) + + assert np.array_equal(secondary[:15], expected_vectors[1][:-1]) + assert np.array_equal(secondary[16:], uncompressed_expected_vectors) + + # Test if first primary vector is too long + primary_first_vector = "00000010000001000000100000010000000100000010000011" + primary_long_second_vector = ( + "0000000000000000000001011100000000000000000000001" + "0011000000000000000000000000100101011" + ) + + input_data = np.array( + [ + int(i) + for i in headers + + primary_first_vector + + primary_long_second_vector + + uncompressed_bits + + secondary_compressed + ], + dtype=np.uint8, + ) + input_data = np.packbits(input_data) + + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 6, 16) + assert len(primary) == 6 + assert np.array_equal(primary[0], expected_vectors[0][0]) + assert np.array_equal(primary[2:], uncompressed_expected_vectors) + + +def test_different_compression_width(raw_compressed_vectors): + # Compression headers - indicating a 12 bit width and no range section + headers = "00110000" + + first_primary_vector = "00100000010010000001000000000010000011" + first_secondary_vector = "00000001011000000000000011111111111101" + + primary_compressed = raw_compressed_vectors[0] + secondary_compressed = raw_compressed_vectors[1] + + expected_first_vector = [516, -2032, 32, 3] + expected_second_vector = [22, 0, -1, 1] + + padding = "00000" # Pad to byte boundary + + input_data = np.array( + [ + int(i) + for i in headers + + first_primary_vector + + primary_compressed + + first_secondary_vector + + secondary_compressed + + padding + ], + dtype=np.uint8, + ) + + input_data = np.packbits(input_data) + (primary, secondary) = MagL1a.process_compressed_vectors(input_data, 16, 16) + + assert np.array_equal(primary[0], expected_first_vector) + assert np.array_equal(secondary[0], expected_second_vector) + + assert sum(primary[-1]) != 0 + assert sum(secondary[-1]) != 0 + + assert len(primary) == 16 + assert len(secondary) == 16 + + +def test_real_uncompressed_vector_data(uncompressed_vector_bytearray, expected_vectors): + primary_expected = expected_vectors[0] + secondary_expected = expected_vectors[1] + + (primary, secondary) = MagL1a.process_uncompressed_vectors( + uncompressed_vector_bytearray, 16, 16 + ) + assert np.array_equal(primary_expected, primary) + assert np.array_equal(secondary_expected, secondary) + + +def test_accumulate_vectors(): + range = 4 + start_vector = np.array([1, 2, 3, range], dtype=np.uint) + + diff_vectors = [1, 1, 1, 3, 0, -3, -1, -10, 1] + + expected_vectors = np.array( + [[1, 2, 3, range], [2, 3, 4, range], [5, 3, 1, range], [4, -7, 2, range]] + ) + + test_vectors = MagL1a.convert_diffs_to_vectors(start_vector, diff_vectors, 4) + + assert np.array_equal(test_vectors, expected_vectors) + + +sixteen_bits = "00000010000001000000100000010000000100000010000011" +twelve_bits = "000000010110000000000000111111111111" +eighteenbits = "000000100000010010000010000001000011000100000010000001" +twentybits = "00010100000010000001001000001000000100001100010000001000000101" + + +@pytest.mark.parametrize( + "vector_string, expected_vectors, width, include_range", + [ + (sixteen_bits, [516, 2064, 4128, 3], 16, 1), + (twelve_bits, [22, 0, -1, 0], 12, 0), + (eighteenbits, [2066, 8259, 16513, 0], 18, 0), + (twentybits, [82049, 133136, -245631, 1], 20, 1), + ], + ids=["16bit", "12bit", "18bit", "20bit"], +) +def test_unpack_one_vector( + vector_string, expected_vectors, uncompressed_vector_bytearray, width, include_range +): + test_vector = np.array([int(i) for i in vector_string], dtype=np.uint8) + test_output = MagL1a.unpack_one_vector(test_vector, width, include_range) + assert all(test_output == expected_vectors) + + +def test_twos_complement(): + # -19 in binary + input_test = np.array([1, 1, 1, 0, 1, 1, 0, 1], dtype=np.uint8) + input_test_uint = np.packbits(input_test) + + twos_complement = MagL1a.twos_complement(input_test_uint, 8) + assert twos_complement == -19 + assert twos_complement.dtype == np.int32 + + # In 12 bits, the number is 237 + twos_complement = MagL1a.twos_complement(input_test_uint, 12) + assert twos_complement == 237 + + # Higher bit number + # -19001 in 16 bits + input_test = np.array( + [1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8 + ) + input_test_uint = np.packbits(input_test) + twos_complement = MagL1a.twos_complement(input_test_uint, 16) + + assert twos_complement == -19001 + + +def test_decode_fib_zig_zag(): + test_values = np.array([1, 0, 0, 1, 0, 0, 1, 1]) + assert MagL1a.decode_fib_zig_zag(test_values) == 13 + + test_values = np.array([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1]) + assert MagL1a.decode_fib_zig_zag(test_values) == -138 + + +def test_process_uncompressed_vector_data(): expected_vector_data = [[1001, 1002, -3001, 3], [2001, -2002, -3333, 1]] # 100 bits, created by hand by appending all bits from expected_vector_data into one @@ -55,13 +730,12 @@ def test_process_vector_data(): # values) hex_string = "03E903EAF447C1F47E0BBCBED0" input_data = np.frombuffer(bytes.fromhex(hex_string), dtype=np.dtype(">b")) - total_primary_vectors = 1 total_secondary_vectors = 1 # 36 bytes (primary_vectors, secondary_vectors) = MagL1a.process_vector_data( - input_data, total_primary_vectors, total_secondary_vectors + input_data, total_primary_vectors, total_secondary_vectors, 0 ) assert primary_vectors[0][0] == expected_vector_data[0][0] diff --git a/poetry.lock b/poetry.lock index 83c5479d1..4a029cde5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "accessible-pygments" @@ -1143,7 +1143,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1151,16 +1150,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1177,7 +1168,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1185,7 +1175,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1747,4 +1736,4 @@ tools = ["openpyxl", "pandas"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "f750033025b765826c827adb1142fd59444e13a985755c2a6efc3a207348a959" +content-hash = "9c2a79266b89746935e7c90fa12f578ad753b9c3d390ce8e5f7dcba9ddcd953c" diff --git a/pyproject.toml b/pyproject.toml index d56531883..86df715c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ pytest-cov = {version="^4.0.0", optional=true} ruff = {version="==0.2.1", optional=true} sphinx = {version="*", optional=true} sphinxcontrib-openapi = {version="^0.8.3", optional=true} -mypy = {version="^1.10.1", optional=true} +mypy = {version="1.10.1", optional=true} [tool.poetry.extras] dev = ["pre-commit", "ruff", "mypy"]