diff --git a/src/soundevent/arrays/dimensions.py b/src/soundevent/arrays/dimensions.py index 203067e..b877b2e 100644 --- a/src/soundevent/arrays/dimensions.py +++ b/src/soundevent/arrays/dimensions.py @@ -493,9 +493,7 @@ def get_dim_step( return attrs[DimAttrs.step.value] if not estimate_step: - raise ValueError( - f"Step size not found in the '{dim}' dimension attributes." - ) + raise ValueError(f"Step size not found in the '{dim}' dimension attributes.") return estimate_dim_step( coord.data, diff --git a/src/soundevent/arrays/operations.py b/src/soundevent/arrays/operations.py index f01ac97..ba5a226 100644 --- a/src/soundevent/arrays/operations.py +++ b/src/soundevent/arrays/operations.py @@ -7,11 +7,7 @@ from numpy.typing import DTypeLike from xarray.core.types import InterpOptions -from soundevent.arrays.dimensions import ( - create_range_dim, - get_dim_range, - get_dim_step, -) +from soundevent.arrays.dimensions import create_range_dim, get_dim_range, get_dim_step __all__ = [ "center", @@ -92,9 +88,7 @@ def crop_dim( stop = current_stop if start > stop: - raise ValueError( - f"Start value {start} must be less than stop value {stop}" - ) + raise ValueError(f"Start value {start} must be less than stop value {stop}") if start < current_start or stop > current_stop: raise ValueError( @@ -180,9 +174,7 @@ def extend_dim( stop = current_stop if start > stop: - raise ValueError( - f"Start value {start} must be less than stop value {stop}" - ) + raise ValueError(f"Start value {start} must be less than stop value {stop}") step = get_dim_step(arr, dim) @@ -312,9 +304,7 @@ def set_value_at_pos( start, stop = get_dim_range(array, dim) if coord < start or coord > stop: - raise KeyError( - f"Position {coord} is outside the range of dimension {dim}." - ) + raise KeyError(f"Position {coord} is outside the range of dimension {dim}.") index = array.indexes[dim].get_slice_bound(coord, "right") indexer[dims[dim]] = index - 1 diff --git a/src/soundevent/audio/chunks.py b/src/soundevent/audio/chunks.py index 3f2b2d5..975e8bb 100644 --- a/src/soundevent/audio/chunks.py +++ b/src/soundevent/audio/chunks.py @@ -111,8 +111,7 @@ def _read_chunk(riff: BinaryIO) -> Optional[Chunk]: if chunk_id in CHUNKS_WITH_SUBCHUNKS: chunk.subchunks = { - subchunk.chunk_id: subchunk - for subchunk in _get_subchunks(riff, size - 4) + subchunk.chunk_id: subchunk for subchunk in _get_subchunks(riff, size - 4) } else: riff.seek(size, os.SEEK_CUR) diff --git a/src/soundevent/audio/filter.py b/src/soundevent/audio/filter.py index 6df6111..63ff040 100644 --- a/src/soundevent/audio/filter.py +++ b/src/soundevent/audio/filter.py @@ -18,9 +18,7 @@ def _get_filter( order: int = 5, ) -> np.ndarray: if low_freq is None and high_freq is None: - raise ValueError( - "At least one of low_freq and high_freq must be specified." - ) + raise ValueError("At least one of low_freq and high_freq must be specified.") if low_freq is None: # Low pass filter diff --git a/src/soundevent/audio/media_info.py b/src/soundevent/audio/media_info.py index e3ac9e6..6e44bd8 100644 --- a/src/soundevent/audio/media_info.py +++ b/src/soundevent/audio/media_info.py @@ -156,9 +156,7 @@ def get_media_info(path: PathLike) -> MediaInfo: # chunk is the size of the data subchunk divided by the number # of channels and the bit depth. data_chunk = chunk.subchunks["data"] - samples = ( - 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) - ) + samples = 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) duration = samples / fmt_info.samplerate return MediaInfo( diff --git a/src/soundevent/audio/spectrum.py b/src/soundevent/audio/spectrum.py index 57a4738..3ab9f40 100644 --- a/src/soundevent/audio/spectrum.py +++ b/src/soundevent/audio/spectrum.py @@ -120,9 +120,7 @@ def pcen_core( raise ValueError(f"eps={eps} must be strictly positive") if time_constant <= 0: - raise ValueError( - f"time_constant={time_constant} must be strictly positive" - ) + raise ValueError(f"time_constant={time_constant} must be strictly positive") if b is None: t_frames = time_constant * sr / float(hop_length) @@ -146,9 +144,7 @@ def pcen_core( if max_size == 1: ref = S elif S.ndim == 1: - raise ValueError( - "Max-filtering cannot be applied to 1-dimensional input" - ) + raise ValueError("Max-filtering cannot be applied to 1-dimensional input") else: if max_axis is None: if S.ndim != 2: diff --git a/src/soundevent/data/annotation_sets.py b/src/soundevent/data/annotation_sets.py index 6ea30e0..7273282 100644 --- a/src/soundevent/data/annotation_sets.py +++ b/src/soundevent/data/annotation_sets.py @@ -28,6 +28,4 @@ class AnnotationSet(BaseModel): default_factory=list, repr=False, ) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/annotation_tasks.py b/src/soundevent/data/annotation_tasks.py index 3c9495c..31ad438 100644 --- a/src/soundevent/data/annotation_tasks.py +++ b/src/soundevent/data/annotation_tasks.py @@ -60,15 +60,11 @@ class StatusBadge(BaseModel): state: AnnotationState owner: Optional[User] = None - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) class AnnotationTask(BaseModel): uuid: UUID = Field(default_factory=uuid4, repr=False) clip: Clip status_badges: List[StatusBadge] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/clip_annotations.py b/src/soundevent/data/clip_annotations.py index f8d4686..acadefe 100644 --- a/src/soundevent/data/clip_annotations.py +++ b/src/soundevent/data/clip_annotations.py @@ -54,6 +54,4 @@ class ClipAnnotation(BaseModel): sequences: List[SequenceAnnotation] = Field(default_factory=list) tags: List[Tag] = Field(default_factory=list) notes: List[Note] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/clip_evaluations.py b/src/soundevent/data/clip_evaluations.py index ad812b0..380a9c8 100644 --- a/src/soundevent/data/clip_evaluations.py +++ b/src/soundevent/data/clip_evaluations.py @@ -95,17 +95,13 @@ def _check_matches(self): } match_targets = [ - match.target.uuid - for match in self.matches - if match.target is not None + match.target.uuid for match in self.matches if match.target is not None ] match_targets_set = set(match_targets) match_sources = [ - match.source.uuid - for match in self.matches - if match.source is not None + match.source.uuid for match in self.matches if match.source is not None ] match_sources_set = set(match_sources) diff --git a/src/soundevent/data/evaluations.py b/src/soundevent/data/evaluations.py index d55db26..4ef34ed 100644 --- a/src/soundevent/data/evaluations.py +++ b/src/soundevent/data/evaluations.py @@ -25,9 +25,7 @@ class Evaluation(BaseModel): """Evaluation Class.""" uuid: UUID = Field(default_factory=uuid4, repr=False) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) evaluation_task: str clip_evaluations: Sequence[ClipEvaluation] = Field(default_factory=list) metrics: Sequence[Feature] = Field(default_factory=list) diff --git a/src/soundevent/data/geometries.py b/src/soundevent/data/geometries.py index 35b8c37..be5af2c 100644 --- a/src/soundevent/data/geometries.py +++ b/src/soundevent/data/geometries.py @@ -252,9 +252,7 @@ def _validate_time_interval(cls, v: List[Time]) -> List[Time]: after the end time). """ if len(v) != 2: - raise ValueError( - "The time interval must have exactly two time stamps." - ) + raise ValueError("The time interval must have exactly two time stamps.") if v[0] > v[1]: raise ValueError("The start time must be before the end time.") @@ -325,9 +323,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The time must be positive.") if frequency < 0 or frequency > MAX_FREQUENCY: - raise ValueError( - f"The frequency must be between 0 and {MAX_FREQUENCY}." - ) + raise ValueError(f"The frequency must be between 0 and {MAX_FREQUENCY}.") return v @@ -473,8 +469,7 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " - f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." ) return v @@ -532,9 +527,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: negative or the frequency is outside the valid range). """ if len(v) != 4: - raise ValueError( - "The bounding box must have exactly four coordinates." - ) + raise ValueError("The bounding box must have exactly four coordinates.") start_time, low_freq, end_time, high_freq = v @@ -558,9 +551,7 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The start time must be before the end time.") if low_freq > high_freq: - raise ValueError( - "The start frequency must be before the end frequency." - ) + raise ValueError("The start frequency must be before the end frequency.") return v @@ -771,9 +762,7 @@ def _validate_coordinates( negative or the frequency is outside the valid range). """ if len(v) < 1: - raise ValueError( - "The multipolygon must have at least one polygon." - ) + raise ValueError("The multipolygon must have at least one polygon.") for polygon in v: if len(polygon) < 1: @@ -781,9 +770,7 @@ def _validate_coordinates( for ring in polygon: if len(ring) < 3: - raise ValueError( - "Each ring must have at least three points." - ) + raise ValueError("Each ring must have at least three points.") for time, frequency in ring: if time < 0: @@ -791,8 +778,7 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " - f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." ) return v @@ -921,6 +907,4 @@ def geometry_validate( from_attributes=mode == "attributes", ) except ValidationError as error: - raise ValueError( - f"Object {obj} is not a valid {geom_type}." - ) from error + raise ValueError(f"Object {obj} is not a valid {geom_type}.") from error diff --git a/src/soundevent/data/notes.py b/src/soundevent/data/notes.py index de0de81..3ee2de6 100644 --- a/src/soundevent/data/notes.py +++ b/src/soundevent/data/notes.py @@ -95,9 +95,7 @@ class Note(BaseModel): message: str created_by: Optional[User] = None is_issue: bool = False - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) def __hash__(self): """Hash the Note object.""" diff --git a/src/soundevent/data/prediction_sets.py b/src/soundevent/data/prediction_sets.py index edffd2a..1cf3bd6 100644 --- a/src/soundevent/data/prediction_sets.py +++ b/src/soundevent/data/prediction_sets.py @@ -73,6 +73,4 @@ class PredictionSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) clip_predictions: List[ClipPrediction] = Field(default_factory=list) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/recording_sets.py b/src/soundevent/data/recording_sets.py index dde95ef..cb1ff5f 100644 --- a/src/soundevent/data/recording_sets.py +++ b/src/soundevent/data/recording_sets.py @@ -12,6 +12,4 @@ class RecordingSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) recordings: List[Recording] = Field(default_factory=list, repr=False) - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) diff --git a/src/soundevent/data/recordings.py b/src/soundevent/data/recordings.py index 088d23a..b2cbb7c 100644 --- a/src/soundevent/data/recordings.py +++ b/src/soundevent/data/recordings.py @@ -193,10 +193,7 @@ def from_file( Recording The recording object. """ - from soundevent.audio.media_info import ( - compute_md5_checksum, - get_media_info, - ) + from soundevent.audio.media_info import compute_md5_checksum, get_media_info media_info = get_media_info(path) diff --git a/src/soundevent/evaluation/affinity.py b/src/soundevent/evaluation/affinity.py index 801e836..1de83f1 100644 --- a/src/soundevent/evaluation/affinity.py +++ b/src/soundevent/evaluation/affinity.py @@ -1,11 +1,7 @@ """Measures of affinity between sound events geometries.""" from soundevent import data -from soundevent.geometry import ( - buffer_geometry, - compute_bounds, - geometry_to_shapely, -) +from soundevent.geometry import buffer_geometry, compute_bounds, geometry_to_shapely __all__ = [ "compute_affinity", @@ -88,10 +84,7 @@ def compute_affinity( geometry1 = _prepare_geometry(geometry1, time_buffer, freq_buffer) geometry2 = _prepare_geometry(geometry2, time_buffer, freq_buffer) - if ( - geometry1.type in TIME_GEOMETRY_TYPES - or geometry2.type in TIME_GEOMETRY_TYPES - ): + if geometry1.type in TIME_GEOMETRY_TYPES or geometry2.type in TIME_GEOMETRY_TYPES: return compute_affinity_in_time(geometry1, geometry2) shp1 = geometry_to_shapely(geometry1) @@ -114,12 +107,8 @@ def compute_affinity_in_time( start_time1, _, end_time1, _ = compute_bounds(geometry1) start_time2, _, end_time2, _ = compute_bounds(geometry2) - intersection = max( - 0, min(end_time1, end_time2) - max(start_time1, start_time2) - ) - union = ( - (end_time1 - start_time1) + (end_time2 - start_time2) - intersection - ) + intersection = max(0, min(end_time1, end_time2) - max(start_time1, start_time2)) + union = (end_time1 - start_time1) + (end_time2 - start_time2) - intersection if union == 0: return 0 diff --git a/src/soundevent/evaluation/metrics.py b/src/soundevent/evaluation/metrics.py index 60dc829..22e3c71 100644 --- a/src/soundevent/evaluation/metrics.py +++ b/src/soundevent/evaluation/metrics.py @@ -43,9 +43,7 @@ def balanced_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.balanced_accuracy_score( @@ -59,9 +57,7 @@ def accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.accuracy_score( # type: ignore @@ -75,9 +71,7 @@ def top_3_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array( - [y if y is not None else num_classes for y in y_true] - ) + y_true_array = np.array([y if y is not None else num_classes for y in y_true]) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] return metrics.top_k_accuracy_score( # type: ignore y_true=y_true_array, diff --git a/src/soundevent/evaluation/tasks/__init__.py b/src/soundevent/evaluation/tasks/__init__.py index bc14174..0d1b3b3 100644 --- a/src/soundevent/evaluation/tasks/__init__.py +++ b/src/soundevent/evaluation/tasks/__init__.py @@ -5,9 +5,7 @@ from soundevent.evaluation.tasks.sound_event_classification import ( sound_event_classification, ) -from soundevent.evaluation.tasks.sound_event_detection import ( - sound_event_detection, -) +from soundevent.evaluation.tasks.sound_event_detection import sound_event_detection __all__ = [ "clip_classification", diff --git a/src/soundevent/evaluation/tasks/clip_classification.py b/src/soundevent/evaluation/tasks/clip_classification.py index 814052a..aa509ea 100644 --- a/src/soundevent/evaluation/tasks/clip_classification.py +++ b/src/soundevent/evaluation/tasks/clip_classification.py @@ -164,8 +164,6 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score - for example in evaluated_examples - if example.score is not None + example.score for example in evaluated_examples if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py index c9f23a5..6cc3249 100644 --- a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py +++ b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py @@ -166,8 +166,6 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: valid_scores = [ - example.score - for example in evaluated_examples - if example.score is not None + example.score for example in evaluated_examples if example.score is not None ] return float(np.mean(valid_scores)) if valid_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/common.py b/src/soundevent/evaluation/tasks/common.py index 0e2cb58..732f547 100644 --- a/src/soundevent/evaluation/tasks/common.py +++ b/src/soundevent/evaluation/tasks/common.py @@ -7,9 +7,7 @@ def iterate_over_valid_clips( clip_predictions: Sequence[data.ClipPrediction], clip_annotations: Sequence[data.ClipAnnotation], ) -> Iterable[Tuple[data.ClipAnnotation, data.ClipPrediction]]: - annotated_clips = { - example.clip.uuid: example for example in clip_annotations - } + annotated_clips = {example.clip.uuid: example for example in clip_annotations} for predictions in clip_predictions: if predictions.clip.uuid in annotated_clips: diff --git a/src/soundevent/evaluation/tasks/sound_event_classification.py b/src/soundevent/evaluation/tasks/sound_event_classification.py index c860467..029817c 100644 --- a/src/soundevent/evaluation/tasks/sound_event_classification.py +++ b/src/soundevent/evaluation/tasks/sound_event_classification.py @@ -18,9 +18,7 @@ "sound_event_classification", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( - metrics.true_class_probability, -) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -121,9 +119,7 @@ def _evaluate_clip( if sound_event_prediction.sound_event.uuid not in _valid_sound_events: continue - annotation = _valid_sound_events[ - sound_event_prediction.sound_event.uuid - ] + annotation = _valid_sound_events[sound_event_prediction.sound_event.uuid] true_class, predicted_classes, match = _evaluate_sound_event( sound_event_prediction=sound_event_prediction, sound_event_annotation=annotation, @@ -134,9 +130,7 @@ def _evaluate_clip( predicted_classes_scores.append(predicted_classes) matches.append(match) - score = np.mean( - [match.score for match in matches if match.score is not None] - ) + score = np.mean([match.score for match in matches if match.score is not None]) return ( true_classes, @@ -193,8 +187,6 @@ def _compute_overall_score( evaluated_clip: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score - for example in evaluated_clip - if example.score is not None + example.score for example in evaluated_clip if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/sound_event_detection.py b/src/soundevent/evaluation/tasks/sound_event_detection.py index a841e55..09a3ba9 100644 --- a/src/soundevent/evaluation/tasks/sound_event_detection.py +++ b/src/soundevent/evaluation/tasks/sound_event_detection.py @@ -20,9 +20,7 @@ "evaluate_clip", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( - metrics.true_class_probability, -) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) EXAMPLE_METRICS: Sequence[metrics.Metric] = () diff --git a/src/soundevent/geometry/__init__.py b/src/soundevent/geometry/__init__.py index 7342d11..79c7b10 100644 --- a/src/soundevent/geometry/__init__.py +++ b/src/soundevent/geometry/__init__.py @@ -14,10 +14,7 @@ """ from soundevent.geometry.conversion import geometry_to_shapely -from soundevent.geometry.features import ( - GeometricFeature, - compute_geometric_features, -) +from soundevent.geometry.features import GeometricFeature, compute_geometric_features from soundevent.geometry.html import geometry_to_html from soundevent.geometry.operations import buffer_geometry, compute_bounds from soundevent.geometry.positions import get_geometry_point diff --git a/src/soundevent/geometry/features.py b/src/soundevent/geometry/features.py index a251bfe..87b7dd3 100644 --- a/src/soundevent/geometry/features.py +++ b/src/soundevent/geometry/features.py @@ -154,9 +154,7 @@ def _compute_multi_point_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] @@ -171,9 +169,7 @@ def _compute_multi_linestring_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] @@ -188,15 +184,11 @@ def _compute_multi_polygon_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature( - name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) - ), + Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), ] -_COMPUTE_FEATURES: Dict[ - geometries.GeometryType, Callable[[Any], List[Feature]] -] = { +_COMPUTE_FEATURES: Dict[geometries.GeometryType, Callable[[Any], List[Feature]]] = { geometries.TimeStamp.geom_type(): _compute_time_stamp_features, geometries.TimeInterval.geom_type(): _compute_time_interval_features, geometries.BoundingBox.geom_type(): _compute_bounding_box_features, diff --git a/src/soundevent/geometry/html.py b/src/soundevent/geometry/html.py index e705c85..7554044 100644 --- a/src/soundevent/geometry/html.py +++ b/src/soundevent/geometry/html.py @@ -105,11 +105,7 @@ def axis_label( inner_style = "; ".join( [ "display: inline", - ( - "vertical-align: top" - if axis == "time" - else "vertical-align: bottom" - ), + ("vertical-align: top" if axis == "time" else "vertical-align: bottom"), ] ) diff --git a/src/soundevent/geometry/positions.py b/src/soundevent/geometry/positions.py index 6997ee9..b371ca2 100644 --- a/src/soundevent/geometry/positions.py +++ b/src/soundevent/geometry/positions.py @@ -1,10 +1,10 @@ -from typing import Tuple, Literal +from typing import Literal, Tuple + import shapely from soundevent.data import Geometry -from soundevent.geometry.operations import compute_bounds from soundevent.geometry.conversion import geometry_to_shapely - +from soundevent.geometry.operations import compute_bounds __all__ = [ "get_geometry_point", diff --git a/src/soundevent/io/aoef/__init__.py b/src/soundevent/io/aoef/__init__.py index 08659c8..1aed5d9 100644 --- a/src/soundevent/io/aoef/__init__.py +++ b/src/soundevent/io/aoef/__init__.py @@ -34,10 +34,7 @@ from soundevent import data from soundevent.io.types import DataCollections, DataType -from .annotation_project import ( - AnnotationProjectAdapter, - AnnotationProjectObject, -) +from .annotation_project import AnnotationProjectAdapter, AnnotationProjectObject from .annotation_set import AnnotationSetAdapter, AnnotationSetObject from .dataset import DatasetAdapter, DatasetObject from .evaluation import EvaluationAdapter, EvaluationObject @@ -87,9 +84,7 @@ class AOEFObject(BaseModel): """Schema definition for an AOEF object.""" version: str = AOEF_VERSION - created_on: datetime.datetime = Field( - default_factory=datetime.datetime.now - ) + created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) data: Union[ EvaluationObject, DatasetObject, @@ -162,9 +157,7 @@ def load( if aoef_object.version != AOEF_VERSION: version = aoef_object.version - raise ValueError( - f"Invalid AOEF version: {version} (expected {AOEF_VERSION})" - ) + raise ValueError(f"Invalid AOEF version: {version} (expected {AOEF_VERSION})") return to_soundevent(aoef_object, audio_dir=audio_dir) diff --git a/src/soundevent/io/aoef/adapters.py b/src/soundevent/io/aoef/adapters.py index ac21ca3..1ea7920 100644 --- a/src/soundevent/io/aoef/adapters.py +++ b/src/soundevent/io/aoef/adapters.py @@ -47,9 +47,7 @@ def to_aoef(self, obj: C) -> D: ... def to_soundevent(self, obj: D) -> C: ... -class DataAdapter( - ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey] -): +class DataAdapter(ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey]): """Base class for data adapters. A data adapter is used to convert between sound event and AOEF data @@ -66,9 +64,7 @@ def __init__(self): self._aoef_store: Dict[AOEFKey, AOEFObject] = {} @abstractmethod - def assemble_aoef( - self, obj: SoundEventObject, obj_id: AOEFKey - ) -> AOEFObject: + def assemble_aoef(self, obj: SoundEventObject, obj_id: AOEFKey) -> AOEFObject: """Create AOEF object from sound event object. Parameters diff --git a/src/soundevent/io/aoef/annotation_project.py b/src/soundevent/io/aoef/annotation_project.py index ef630b9..5046f5e 100644 --- a/src/soundevent/io/aoef/annotation_project.py +++ b/src/soundevent/io/aoef/annotation_project.py @@ -26,26 +26,18 @@ def __init__( **kwargs, ): super().__init__(**kwargs) - self.annotation_task_adapter = ( - annotation_task_adapter - or AnnotationTaskAdapter( - self.clip_adapter, - self.user_adapter, - ) + self.annotation_task_adapter = annotation_task_adapter or AnnotationTaskAdapter( + self.clip_adapter, + self.user_adapter, ) def to_aoef( # type: ignore self, obj: data.AnnotationProject, # type: ignore ) -> AnnotationProjectObject: - tasks = [ - self.annotation_task_adapter.to_aoef(task) - for task in obj.tasks or [] - ] + tasks = [self.annotation_task_adapter.to_aoef(task) for task in obj.tasks or []] - project_tags = [ - self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags - ] + project_tags = [self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags] annotation_set = super().to_aoef(obj) @@ -75,16 +67,11 @@ def to_soundevent( # type: ignore annotation_set = super().to_soundevent(obj) tasks = [ - self.annotation_task_adapter.to_soundevent(task) - for task in obj.tasks or [] + self.annotation_task_adapter.to_soundevent(task) for task in obj.tasks or [] ] return data.AnnotationProject( - **{ - field: value - for field, value in annotation_set - if value is not None - }, + **{field: value for field, value in annotation_set if value is not None}, tasks=tasks, name=obj.name, description=obj.description, diff --git a/src/soundevent/io/aoef/annotation_set.py b/src/soundevent/io/aoef/annotation_set.py index 9ed43f6..3feacb2 100644 --- a/src/soundevent/io/aoef/annotation_set.py +++ b/src/soundevent/io/aoef/annotation_set.py @@ -11,10 +11,7 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import ( - SequenceAnnotationAdapter, - SequenceAnnotationObject, -) +from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -50,12 +47,8 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotations_adapter: Optional[ - SoundEventAnnotationAdapter - ] = None, - sequence_annotations_adapter: Optional[ - SequenceAnnotationAdapter - ] = None, + sound_event_annotations_adapter: Optional[SoundEventAnnotationAdapter] = None, + sequence_annotations_adapter: Optional[SequenceAnnotationAdapter] = None, clip_annotation_adapter: Optional[ClipAnnotationsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -148,14 +141,10 @@ def to_soundevent( self.sequence_adapter.to_soundevent(sequence) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotations_adapter.to_soundevent( - sound_event_annotation - ) + self.sound_event_annotations_adapter.to_soundevent(sound_event_annotation) for sequence_annotation in obj.sequence_annotations or []: - self.sequence_annotations_adapter.to_soundevent( - sequence_annotation - ) + self.sequence_annotations_adapter.to_soundevent(sequence_annotation) annotated_clips = [ self.clip_annotation_adapter.to_soundevent(clip_annotation) diff --git a/src/soundevent/io/aoef/clip_annotations.py b/src/soundevent/io/aoef/clip_annotations.py index c74cbe6..dc4cd19 100644 --- a/src/soundevent/io/aoef/clip_annotations.py +++ b/src/soundevent/io/aoef/clip_annotations.py @@ -59,9 +59,7 @@ def assemble_aoef( ), sound_events=( [ - self.sound_event_annotation_adapter.to_aoef( - annotation - ).uuid + self.sound_event_annotation_adapter.to_aoef(annotation).uuid for annotation in obj.sound_events ] if obj.sound_events @@ -103,25 +101,16 @@ def assemble_soundevent( se_ann for annotation_id in obj.sound_events or [] if ( - se_ann := self.sound_event_annotation_adapter.from_id( - annotation_id - ) + se_ann := self.sound_event_annotation_adapter.from_id(annotation_id) ) is not None ], sequences=[ seq_ann for annotation_id in obj.sequences or [] - if ( - seq_ann := self.sequence_annotation_adapter.from_id( - annotation_id - ) - ) + if (seq_ann := self.sequence_annotation_adapter.from_id(annotation_id)) is not None ], - notes=[ - self.note_adapter.to_soundevent(note) - for note in obj.notes or [] - ], + notes=[self.note_adapter.to_soundevent(note) for note in obj.notes or []], created_on=obj.created_on or datetime.datetime.now(), ) diff --git a/src/soundevent/io/aoef/clip_evaluation.py b/src/soundevent/io/aoef/clip_evaluation.py index 0cddee6..73fd191 100644 --- a/src/soundevent/io/aoef/clip_evaluation.py +++ b/src/soundevent/io/aoef/clip_evaluation.py @@ -50,10 +50,7 @@ def assemble_aoef( annotations=annotations.uuid, predictions=predictions.uuid, matches=( - [ - self.match_adapter.to_aoef(match).uuid - for match in obj.matches - ] + [self.match_adapter.to_aoef(match).uuid for match in obj.matches] if obj.matches else None ), @@ -73,14 +70,10 @@ def assemble_soundevent( predictions = self.clip_predictions_adapter.from_id(obj.predictions) if annotations is None: - raise ValueError( - f"Clip annotations with ID {obj.annotations} not found." - ) + raise ValueError(f"Clip annotations with ID {obj.annotations} not found.") if predictions is None: - raise ValueError( - f"Clip predictions with ID {obj.predictions} not found." - ) + raise ValueError(f"Clip predictions with ID {obj.predictions} not found.") matches = [ match diff --git a/src/soundevent/io/aoef/clip_predictions.py b/src/soundevent/io/aoef/clip_predictions.py index df7dc2c..cb0c358 100644 --- a/src/soundevent/io/aoef/clip_predictions.py +++ b/src/soundevent/io/aoef/clip_predictions.py @@ -47,9 +47,7 @@ def assemble_aoef( clip=self.clip_adapter.to_aoef(obj.clip).uuid, sound_events=( [ - self.sound_event_prediction_adapter.to_aoef( - sound_event - ).uuid + self.sound_event_prediction_adapter.to_aoef(sound_event).uuid for sound_event in obj.sound_events ] if obj.sound_events @@ -67,8 +65,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None @@ -95,21 +92,13 @@ def assemble_soundevent( sound_events=[ se_pred for sound_event in obj.sound_events or [] - if ( - se_pred := self.sound_event_prediction_adapter.from_id( - sound_event - ) - ) + if (se_pred := self.sound_event_prediction_adapter.from_id(sound_event)) is not None ], sequences=[ seq_pred for sequence in obj.sequences or [] - if ( - seq_pred := self.sequence_prediction_adapter.from_id( - sequence - ) - ) + if (seq_pred := self.sequence_prediction_adapter.from_id(sequence)) is not None ], tags=[ diff --git a/src/soundevent/io/aoef/dataset.py b/src/soundevent/io/aoef/dataset.py index e667bf4..c62bc59 100644 --- a/src/soundevent/io/aoef/dataset.py +++ b/src/soundevent/io/aoef/dataset.py @@ -35,9 +35,7 @@ def to_soundevent( # type: ignore ) -> data.Dataset: recording_set = super().to_soundevent(obj) return data.Dataset( - **{ - key: value for key, value in recording_set if value is not None - }, + **{key: value for key, value in recording_set if value is not None}, name=obj.name, description=obj.description, ) diff --git a/src/soundevent/io/aoef/evaluation.py b/src/soundevent/io/aoef/evaluation.py index e53e41f..91b2dfe 100644 --- a/src/soundevent/io/aoef/evaluation.py +++ b/src/soundevent/io/aoef/evaluation.py @@ -14,14 +14,8 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import ( - SequenceAnnotationAdapter, - SequenceAnnotationObject, -) -from .sequence_prediction import ( - SequencePredictionAdapter, - SequencePredictionObject, -) +from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject +from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -69,19 +63,11 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotation_adapter: Optional[ - SoundEventAnnotationAdapter - ] = None, - sequence_annotation_adapter: Optional[ - SequenceAnnotationAdapter - ] = None, + sound_event_annotation_adapter: Optional[SoundEventAnnotationAdapter] = None, + sequence_annotation_adapter: Optional[SequenceAnnotationAdapter] = None, clip_annotations_adapter: Optional[ClipAnnotationsAdapter] = None, - sound_event_prediction_adapter: Optional[ - SoundEventPredictionAdapter - ] = None, - sequence_prediction_adapter: Optional[ - SequencePredictionAdapter - ] = None, + sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, + sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, clip_evaluation_adapter: Optional[ClipEvaluationAdapter] = None, match_adapter: Optional[MatchAdapter] = None, @@ -158,14 +144,11 @@ def __init__( self.sound_event_annotation_adapter, self.sound_event_prediction_adapter, ) - self.clip_evaluation_adapter = ( - clip_evaluation_adapter - or ClipEvaluationAdapter( - self.clip_annotations_adapter, - self.clip_predictions_adapter, - self.note_adapter, - self.match_adapter, - ) + self.clip_evaluation_adapter = clip_evaluation_adapter or ClipEvaluationAdapter( + self.clip_annotations_adapter, + self.clip_predictions_adapter, + self.note_adapter, + self.match_adapter, ) def to_aoef(self, obj: data.Evaluation) -> EvaluationObject: @@ -225,9 +208,7 @@ def to_soundevent( self.clip_adapter.to_soundevent(clip) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotation_adapter.to_soundevent( - sound_event_annotation - ) + self.sound_event_annotation_adapter.to_soundevent(sound_event_annotation) for sequence_annotation in obj.sequence_annotations or []: self.sequence_annotation_adapter.to_soundevent(sequence_annotation) @@ -236,9 +217,7 @@ def to_soundevent( self.clip_annotations_adapter.to_soundevent(clip_annotation) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent( - sound_event_prediction - ) + self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/evaluation_set.py b/src/soundevent/io/aoef/evaluation_set.py index 53b9e9f..0242ed2 100644 --- a/src/soundevent/io/aoef/evaluation_set.py +++ b/src/soundevent/io/aoef/evaluation_set.py @@ -35,10 +35,7 @@ def to_aoef( # type: ignore name=obj.name, description=obj.description, evaluation_tags=( - [ - self.tag_adapter.to_aoef(tag).id - for tag in obj.evaluation_tags - ] + [self.tag_adapter.to_aoef(tag).id for tag in obj.evaluation_tags] if obj.evaluation_tags else None ), @@ -50,11 +47,7 @@ def to_soundevent( # type: ignore ) -> data.EvaluationSet: annotation_set = super().to_soundevent(obj) return data.EvaluationSet( - **{ - field: value - for field, value in annotation_set - if value is not None - }, + **{field: value for field, value in annotation_set if value is not None}, name=obj.name, description=obj.description, evaluation_tags=[ diff --git a/src/soundevent/io/aoef/match.py b/src/soundevent/io/aoef/match.py index fcc2a9a..0b76dda 100644 --- a/src/soundevent/io/aoef/match.py +++ b/src/soundevent/io/aoef/match.py @@ -36,16 +36,12 @@ def assemble_aoef( ) -> MatchObject: source = None if obj.source is not None: - prediction = self.sound_event_prediction_adapter.to_aoef( - obj.source - ) + prediction = self.sound_event_prediction_adapter.to_aoef(obj.source) source = prediction.uuid if prediction is not None else None target = None if obj.target is not None: - annotation = self.sound_event_annotation_adapter.to_aoef( - obj.target - ) + annotation = self.sound_event_annotation_adapter.to_aoef(obj.target) target = annotation.uuid if annotation is not None else None return MatchObject( diff --git a/src/soundevent/io/aoef/prediction_set.py b/src/soundevent/io/aoef/prediction_set.py index 2c55188..58246b3 100644 --- a/src/soundevent/io/aoef/prediction_set.py +++ b/src/soundevent/io/aoef/prediction_set.py @@ -11,10 +11,7 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_prediction import ( - SequencePredictionAdapter, - SequencePredictionObject, -) +from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_prediction import ( SoundEventPredictionAdapter, @@ -50,12 +47,8 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_prediction_adapter: Optional[ - SoundEventPredictionAdapter - ] = None, - sequence_prediction_adapter: Optional[ - SequencePredictionAdapter - ] = None, + sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, + sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -136,9 +129,7 @@ def to_soundevent(self, obj: PredictionSetObject) -> data.PredictionSet: self.clip_adapter.to_soundevent(clip) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent( - sound_event_prediction - ) + self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/recording.py b/src/soundevent/io/aoef/recording.py index 59e9021..dbfc61a 100644 --- a/src/soundevent/io/aoef/recording.py +++ b/src/soundevent/io/aoef/recording.py @@ -34,9 +34,7 @@ class RecordingObject(BaseModel): rights: Optional[str] = None -class RecordingAdapter( - DataAdapter[data.Recording, RecordingObject, UUID, UUID] -): +class RecordingAdapter(DataAdapter[data.Recording, RecordingObject, UUID, UUID]): def __init__( self, user_adapter: UserAdapter, @@ -59,10 +57,7 @@ def assemble_aoef( notes = [self._note_adapter.to_aoef(note) for note in obj.notes] - owners = [ - self._user_adapter.to_aoef(owner).uuid - for owner in obj.owners or [] - ] + owners = [self._user_adapter.to_aoef(owner).uuid for owner in obj.owners or []] path = obj.path if self.audio_dir is not None: @@ -74,9 +69,7 @@ def assemble_aoef( duration=obj.duration, channels=obj.channels, samplerate=obj.samplerate, - time_expansion=( - obj.time_expansion if obj.time_expansion != 1.0 else None - ), + time_expansion=(obj.time_expansion if obj.time_expansion != 1.0 else None), hash=obj.hash, date=obj.date, time=obj.time, @@ -100,10 +93,7 @@ def assemble_soundevent(self, obj: RecordingObject) -> data.Recording: if (tag := self._tag_adapter.from_id(tag_id)) is not None ] - notes = [ - self._note_adapter.to_soundevent(note) - for note in (obj.notes or []) - ] + notes = [self._note_adapter.to_soundevent(note) for note in (obj.notes or [])] owners = [ user diff --git a/src/soundevent/io/aoef/recording_set.py b/src/soundevent/io/aoef/recording_set.py index 3e7c95a..3707d6d 100644 --- a/src/soundevent/io/aoef/recording_set.py +++ b/src/soundevent/io/aoef/recording_set.py @@ -47,8 +47,7 @@ def to_aoef( obj: data.RecordingSet, ) -> RecordingSetObject: recording_objects = [ - self.recording_adapter.to_aoef(recording) - for recording in obj.recordings + self.recording_adapter.to_aoef(recording) for recording in obj.recordings ] return RecordingSetObject( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sequence.py b/src/soundevent/io/aoef/sequence.py index 2c1e53e..ec624b2 100644 --- a/src/soundevent/io/aoef/sequence.py +++ b/src/soundevent/io/aoef/sequence.py @@ -28,9 +28,7 @@ def __init__( super().__init__() self.soundevent_adapter = soundevent_adapter - def assemble_aoef( - self, obj: data.Sequence, obj_id: UUID - ) -> SequenceObject: + def assemble_aoef(self, obj: data.Sequence, obj_id: UUID) -> SequenceObject: parent = None if obj.parent: parent = self.to_aoef(obj.parent).uuid diff --git a/src/soundevent/io/aoef/sequence_prediction.py b/src/soundevent/io/aoef/sequence_prediction.py index 636f3df..1716d7d 100644 --- a/src/soundevent/io/aoef/sequence_prediction.py +++ b/src/soundevent/io/aoef/sequence_prediction.py @@ -42,8 +42,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None diff --git a/src/soundevent/io/aoef/sound_event.py b/src/soundevent/io/aoef/sound_event.py index 9e5a175..424a20c 100644 --- a/src/soundevent/io/aoef/sound_event.py +++ b/src/soundevent/io/aoef/sound_event.py @@ -18,9 +18,7 @@ class SoundEventObject(BaseModel): features: Optional[Dict[str, float]] = None -class SoundEventAdapter( - DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID] -): +class SoundEventAdapter(DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID]): def __init__( self, recording_adapter: RecordingAdapter, diff --git a/src/soundevent/io/aoef/sound_event_annotation.py b/src/soundevent/io/aoef/sound_event_annotation.py index afa07c3..67733d4 100644 --- a/src/soundevent/io/aoef/sound_event_annotation.py +++ b/src/soundevent/io/aoef/sound_event_annotation.py @@ -22,9 +22,7 @@ class SoundEventAnnotationObject(BaseModel): class SoundEventAnnotationAdapter( - DataAdapter[ - data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID - ] + DataAdapter[data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID] ): def __init__( self, @@ -68,9 +66,7 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError( - f"Sound event with ID {obj.sound_event} not found." - ) + raise ValueError(f"Sound event with ID {obj.sound_event} not found.") return data.SoundEventAnnotation( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sound_event_prediction.py b/src/soundevent/io/aoef/sound_event_prediction.py index 175e06a..007f4ae 100644 --- a/src/soundevent/io/aoef/sound_event_prediction.py +++ b/src/soundevent/io/aoef/sound_event_prediction.py @@ -18,9 +18,7 @@ class SoundEventPredictionObject(BaseModel): class SoundEventPredictionAdapter( - DataAdapter[ - data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID - ] + DataAdapter[data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID] ): def __init__( self, @@ -44,8 +42,7 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) - is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None ] if obj.tags else None @@ -59,9 +56,7 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError( - f"Sound event with ID {obj.sound_event} not found." - ) + raise ValueError(f"Sound event with ID {obj.sound_event} not found.") return data.SoundEventPrediction( uuid=obj.uuid or uuid4(), diff --git a/src/soundevent/io/crowsetta/__init__.py b/src/soundevent/io/crowsetta/__init__.py index a933a8f..63fc6a2 100644 --- a/src/soundevent/io/crowsetta/__init__.py +++ b/src/soundevent/io/crowsetta/__init__.py @@ -8,10 +8,7 @@ annotation_from_clip_annotation, annotation_to_clip_annotation, ) -from soundevent.io.crowsetta.bbox import ( - bbox_from_annotation, - bbox_to_annotation, -) +from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation from soundevent.io.crowsetta.labels import ( label_from_tag, label_from_tags, diff --git a/src/soundevent/io/crowsetta/annotation.py b/src/soundevent/io/crowsetta/annotation.py index 240c4c3..6a8e91e 100644 --- a/src/soundevent/io/crowsetta/annotation.py +++ b/src/soundevent/io/crowsetta/annotation.py @@ -5,10 +5,7 @@ import crowsetta from soundevent import data -from soundevent.io.crowsetta.bbox import ( - bbox_from_annotation, - bbox_to_annotation, -) +from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation from soundevent.io.crowsetta.sequence import ( sequence_from_annotations, sequence_to_annotations, @@ -88,8 +85,7 @@ def annotation_from_clip_annotation( if annotation_fmt != "seq": raise ValueError( - "annotation_fmt must be either 'bbox' or 'seq', " - f"not {annotation_fmt}." + "annotation_fmt must be either 'bbox' or 'seq', " f"not {annotation_fmt}." ) return crowsetta.Annotation( @@ -176,8 +172,7 @@ def annotation_to_clip_annotation( if path is not None and path != recording.path: raise ValueError( - "The path of the annotation does not match the path of the " - "recording." + "The path of the annotation does not match the path of the " "recording." ) sound_event_annotations = [] @@ -195,9 +190,9 @@ def annotation_to_clip_annotation( ) ) - crowsetta_sequences: Union[ - List[crowsetta.Sequence], crowsetta.Sequence - ] = getattr(annot, "seq", []) + crowsetta_sequences: Union[List[crowsetta.Sequence], crowsetta.Sequence] = getattr( + annot, "seq", [] + ) if not isinstance(crowsetta_sequences, list): crowsetta_sequences = [crowsetta_sequences] diff --git a/src/soundevent/io/crowsetta/bbox.py b/src/soundevent/io/crowsetta/bbox.py index 78e6a98..3854a9c 100644 --- a/src/soundevent/io/crowsetta/bbox.py +++ b/src/soundevent/io/crowsetta/bbox.py @@ -23,10 +23,7 @@ def convert_geometry_to_bbox( "because the sound event geometry is not a BoundingBox." ) - if ( - geometry.type in ["TimeInterval", "TimeStamp"] - and raise_on_time_geometries - ): + if geometry.type in ["TimeInterval", "TimeStamp"] and raise_on_time_geometries: raise ValueError( "Cannot convert to a crowsetta bbox because " "the sound event geometry is a TimeInterval or TimeStamp " @@ -162,9 +159,7 @@ def bbox_to_annotation( low_freq = low_freq * recording.time_expansion high_freq = high_freq * recording.time_expansion - geometry = data.BoundingBox( - coordinates=[start_time, low_freq, end_time, high_freq] - ) + geometry = data.BoundingBox(coordinates=[start_time, low_freq, end_time, high_freq]) tags = label_to_tags(bbox.label, **kwargs) diff --git a/src/soundevent/io/formats.py b/src/soundevent/io/formats.py index 8f90b14..a23ae6e 100644 --- a/src/soundevent/io/formats.py +++ b/src/soundevent/io/formats.py @@ -36,6 +36,4 @@ def infer_format(path: PathLike) -> str: if inferrer(path): return format_ - raise ValueError( - f"Cannot infer format of file {path}, or format not supported." - ) + raise ValueError(f"Cannot infer format of file {path}, or format not supported.") diff --git a/src/soundevent/plot/annotation.py b/src/soundevent/plot/annotation.py index 6a79b9a..fc17e24 100644 --- a/src/soundevent/plot/annotation.py +++ b/src/soundevent/plot/annotation.py @@ -75,8 +75,7 @@ def get_tags_position( if func is None: raise NotImplementedError( - f"Plotting tags for geometry of type {geometry.type} " - "is not implemented." + f"Plotting tags for geometry of type {geometry.type} " "is not implemented." ) return func(geometry, bounds) @@ -117,9 +116,7 @@ def _get_tags_position_bounding_box( _TAG_POSITION_FUNCTIONS: Dict[ data.GeometryType, - Callable[ - [data.Geometry, Tuple[float, float, float, float]], Tuple[float, float] - ], + Callable[[data.Geometry, Tuple[float, float, float, float]], Tuple[float, float]], ] = { data.BoundingBox.geom_type(): _get_tags_position_bounding_box, } diff --git a/src/soundevent/plot/geometries.py b/src/soundevent/plot/geometries.py index f595ff2..607c0af 100644 --- a/src/soundevent/plot/geometries.py +++ b/src/soundevent/plot/geometries.py @@ -58,8 +58,7 @@ def _plot_bounding_box_geometry( ) -> Axes: if not isinstance(geometry, data.BoundingBox): raise ValueError( - f"Expected geometry of type {data.BoundingBox}, " - f"got {type(geometry)}." + f"Expected geometry of type {data.BoundingBox}, " f"got {type(geometry)}." ) start_time, low_freq, end_time, high_freq = geometry.coordinates diff --git a/src/soundevent/plot/tags.py b/src/soundevent/plot/tags.py index 73cc25b..96581a8 100644 --- a/src/soundevent/plot/tags.py +++ b/src/soundevent/plot/tags.py @@ -29,9 +29,7 @@ def __init__( self._tags: Dict[data.Tag, str] = {} colormap = get_cmap(cmap) - self._colors = cycle( - [colormap(x) for x in np.linspace(0, 1, num_colors)] - ) + self._colors = cycle([colormap(x) for x in np.linspace(0, 1, num_colors)]) def get_color(self, tag: data.Tag) -> str: """Get color for tag.""" diff --git a/src/soundevent/types.py b/src/soundevent/types.py index 9790044..88a3ffe 100644 --- a/src/soundevent/types.py +++ b/src/soundevent/types.py @@ -1,9 +1,10 @@ """Common types and interfaces within bioacoustic analysis.""" -from abc import abstractmethod, ABC -from soundevent import data +from abc import ABC, abstractmethod from typing import List, Optional +from soundevent import data + class ClassMapper(ABC): """Abstract class for encoding and decoding labels.""" diff --git a/tests/test_array/test_dimensions.py b/tests/test_array/test_dimensions.py index ab31c39..598d295 100644 --- a/tests/test_array/test_dimensions.py +++ b/tests/test_array/test_dimensions.py @@ -183,9 +183,7 @@ def test_create_frequency_dim_from_array_sets_attrs(): def test_create_frequency_dim_from_array_estimates_step(): """Test create_frequency_dim_from_array function.""" arr = np.array([1, 2, 3]) - frequency_dim = arrays.create_frequency_dim_from_array( - arr, estimate_step=True - ) + frequency_dim = arrays.create_frequency_dim_from_array(arr, estimate_step=True) assert frequency_dim.attrs["step"] == 1 diff --git a/tests/test_audio/test_io.py b/tests/test_audio/test_io.py index 2aaaa72..460e3b5 100644 --- a/tests/test_audio/test_io.py +++ b/tests/test_audio/test_io.py @@ -27,9 +27,7 @@ def test_audio_to_bytes_has_correct_length( dtype: np.dtype, ): samples = int(duration * samplerate) - array = np.random.random( - size=[int(duration * samplerate), channels] - ).astype(dtype) + array = np.random.random(size=[int(duration * samplerate), channels]).astype(dtype) bytes_per_sample = (bit_depth // 8) * channels expected_bytes = samples * bytes_per_sample diff --git a/tests/test_data/test_datasets.py b/tests/test_data/test_datasets.py index 1176655..a72938e 100644 --- a/tests/test_data/test_datasets.py +++ b/tests/test_data/test_datasets.py @@ -52,9 +52,7 @@ def test_create_dataset_ignores_non_audio_files(tmp_path: Path): def test_create_dataset_fails_with_non_existing_directory(): """Test that we can create a dataset from audio files.""" with pytest.raises(ValueError): - data.Dataset.from_directory( - Path("non-existing-directory"), name="test" - ) + data.Dataset.from_directory(Path("non-existing-directory"), name="test") def test_create_dataset_fails_if_path_is_file(tmp_path: Path): @@ -78,9 +76,7 @@ def test_create_dataset_without_recursive(tmp_path: Path, random_wav): """Test that we can create a dataset from audio files.""" (tmp_path / "test1").mkdir() random_wav(path=tmp_path / "test1" / "test1.wav") - dataset = data.Dataset.from_directory( - tmp_path, recursive=False, name="test" - ) + dataset = data.Dataset.from_directory(tmp_path, recursive=False, name="test") assert len(dataset.recordings) == 0 diff --git a/tests/test_data/test_geometry.py b/tests/test_data/test_geometry.py index 94bbea4..ac0e45e 100644 --- a/tests/test_data/test_geometry.py +++ b/tests/test_data/test_geometry.py @@ -184,9 +184,7 @@ def test_load_multilinestring_from_dict(): def test_load_multilinestring_from_attributes(): """Test that a MultiLineString can be loaded from attributes.""" - obj = data.MultiLineString( - coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]] - ) + obj = data.MultiLineString(coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) geom = data.geometry_validate(obj, mode="attributes") assert isinstance(geom, data.MultiLineString) assert geom.coordinates == [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] diff --git a/tests/test_evaluation/test_clip_classification.py b/tests/test_evaluation/test_clip_classification.py index be8a681..48790e4 100644 --- a/tests/test_evaluation/test_clip_classification.py +++ b/tests/test_evaluation/test_clip_classification.py @@ -156,9 +156,7 @@ def test_evaluation_has_balanced_accuracy( tags=evaluation_tags, ) - balanced_accuracy = data.find_feature( - evaluation.metrics, name="balanced_accuracy" - ) + balanced_accuracy = data.find_feature(evaluation.metrics, name="balanced_accuracy") assert balanced_accuracy is not None assert math.isclose(balanced_accuracy.value, 0.5, rel_tol=1e-6) @@ -175,9 +173,7 @@ def test_evaluation_has_top_3_accuracy( tags=evaluation_tags, ) - top_3_accuracy = data.find_feature( - evaluation.metrics, name="top_3_accuracy" - ) + top_3_accuracy = data.find_feature(evaluation.metrics, name="top_3_accuracy") assert top_3_accuracy is not None assert math.isclose(top_3_accuracy.value, 1.0, rel_tol=1e-6) @@ -244,11 +240,7 @@ def test_each_example_score_is_the_probability_of_the_true_class( assert len(evaluation.clip_evaluations[1].metrics) == 1 assert evaluation.clip_evaluations[0].score is not None - assert math.isclose( - evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6 - ) + assert math.isclose(evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6) assert evaluation.clip_evaluations[1].score is not None - assert math.isclose( - evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6 - ) + assert math.isclose(evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6) diff --git a/tests/test_evaluation/test_encode.py b/tests/test_evaluation/test_encode.py index cd2758b..1d7bb50 100644 --- a/tests/test_evaluation/test_encode.py +++ b/tests/test_evaluation/test_encode.py @@ -16,9 +16,7 @@ @pytest.fixture -def tags( - random_tags: Callable[[int], Sequence[data.Tag]] -) -> Sequence[data.Tag]: +def tags(random_tags: Callable[[int], Sequence[data.Tag]]) -> Sequence[data.Tag]: """Tags for testing.""" return random_tags(10) diff --git a/tests/test_evaluation/test_matching.py b/tests/test_evaluation/test_matching.py index 0c858da..8ce744d 100644 --- a/tests/test_evaluation/test_matching.py +++ b/tests/test_evaluation/test_matching.py @@ -96,9 +96,7 @@ def test_multi_linestring_is_supported(): def test_multi_polygon_is_supported(): - multi_polygon = data.MultiPolygon( - coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]] - ) + multi_polygon = data.MultiPolygon(coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]]) matches = list(match_geometries([multi_polygon], [multi_polygon])) assert len(matches) == 1 source_index, target_index, affinity = matches[0] diff --git a/tests/test_evaluation/test_sound_event_detection.py b/tests/test_evaluation/test_sound_event_detection.py index 09080e5..a798832 100644 --- a/tests/test_evaluation/test_sound_event_detection.py +++ b/tests/test_evaluation/test_sound_event_detection.py @@ -28,9 +28,7 @@ def test_can_evaluate_nips_data(): assert isinstance(evaluation, data.Evaluation) # check that all clips have been evaluated - assert len(evaluation.clip_evaluations) == len( - evaluation_set.clip_annotations - ) + assert len(evaluation.clip_evaluations) == len(evaluation_set.clip_annotations) # check that all metrics are present assert len(evaluation.metrics) == 4 diff --git a/tests/test_io/test_annotation_projects.py b/tests/test_io/test_annotation_projects.py index 85a6874..580b487 100644 --- a/tests/test_io/test_annotation_projects.py +++ b/tests/test_io/test_annotation_projects.py @@ -40,9 +40,7 @@ def test_saved_annotation_project_is_saved_to_json_file( assert path.exists() -def test_saved_annotation_project_has_correct_info( - monkeypatch, tmp_path: Path -) -> None: +def test_saved_annotation_project_has_correct_info(monkeypatch, tmp_path: Path) -> None: """Test that the saved annotation project has the correct info.""" # Arrange annotation_project = data.AnnotationProject( @@ -175,10 +173,7 @@ def test_can_recover_task_status( # Assert assert recovered == annotation_project - assert ( - recovered.tasks[0].status_badges[0].state - == data.AnnotationState.completed - ) + assert recovered.tasks[0].status_badges[0].state == data.AnnotationState.completed def test_can_recover_user_that_completed_task( @@ -285,9 +280,7 @@ def test_can_recover_task_simple_annotation( clip_annotations=[ data.ClipAnnotation( clip=clip, - sound_events=[ - data.SoundEventAnnotation(sound_event=sound_event) - ], + sound_events=[data.SoundEventAnnotation(sound_event=sound_event)], ) ], tasks=[data.AnnotationTask(clip=clip)], @@ -301,8 +294,7 @@ def test_can_recover_task_simple_annotation( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry - is not None + recovered.clip_annotations[0].sound_events[0].sound_event.geometry is not None ) assert sound_event.geometry is not None assert ( @@ -310,9 +302,7 @@ def test_can_recover_task_simple_annotation( == sound_event.geometry.type ) assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.geometry.coordinates + recovered.clip_annotations[0].sound_events[0].sound_event.geometry.coordinates == sound_event.geometry.coordinates ) @@ -352,13 +342,8 @@ def test_can_recover_task_annotation_with_tags( # Assert assert recovered == annotation_project - assert ( - recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" - ) - assert ( - recovered.clip_annotations[0].sound_events[0].tags[0].value - == "test_species" - ) + assert recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" + assert recovered.clip_annotations[0].sound_events[0].tags[0].value == "test_species" def test_can_recover_annotation_creator( @@ -409,9 +394,7 @@ def test_can_recover_annotation_creation_date( data.ClipAnnotation( clip=clip, sound_events=[ - data.SoundEventAnnotation( - sound_event=sound_event, created_on=date - ) + data.SoundEventAnnotation(sound_event=sound_event, created_on=date) ], ), ], @@ -464,14 +447,8 @@ def test_can_recover_annotation_notes( # Assert assert recovered == annotation_project - assert ( - recovered.clip_annotations[0].sound_events[0].notes[0].message - == "test_note" - ) - assert ( - recovered.clip_annotations[0].sound_events[0].notes[0].created_by - == user - ) + assert recovered.clip_annotations[0].sound_events[0].notes[0].message == "test_note" + assert recovered.clip_annotations[0].sound_events[0].notes[0].created_by == user def test_can_recover_sound_event_features( @@ -513,17 +490,11 @@ def test_can_recover_sound_event_features( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.features[0] - .name + recovered.clip_annotations[0].sound_events[0].sound_event.features[0].name == "duration" ) assert ( - recovered.clip_annotations[0] - .sound_events[0] - .sound_event.features[0] - .value + recovered.clip_annotations[0].sound_events[0].sound_event.features[0].value == 1.0 ) @@ -564,9 +535,7 @@ def test_recording_paths_are_stored_as_relative_if_audio_dir_is_provided( def test_can_parse_nips4plus(tmp_path: Path): """Test that NIPS4BPlus annotations can be parsed.""" - original_path = ( - BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" - ) + original_path = BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" path = tmp_path / "test.json" # Act diff --git a/tests/test_io/test_aoef/test_api.py b/tests/test_io/test_aoef/test_api.py index a278e59..b671410 100644 --- a/tests/test_io/test_aoef/test_api.py +++ b/tests/test_io/test_aoef/test_api.py @@ -71,9 +71,7 @@ def test_load_fails_if_aoef_version_is_not_supported(tmp_path): io.load(path) -def test_save_creates_parent_directories( - tmp_path: Path, dataset: data.Dataset -): +def test_save_creates_parent_directories(tmp_path: Path, dataset: data.Dataset): """Test that the save function creates parent directories.""" # Arrange path = tmp_path / "parent" / "child" / "test.json" diff --git a/tests/test_io/test_crowsetta/test_annotation.py b/tests/test_io/test_crowsetta/test_annotation.py index 3c90a29..07cc7f0 100644 --- a/tests/test_io/test_crowsetta/test_annotation.py +++ b/tests/test_io/test_crowsetta/test_annotation.py @@ -35,9 +35,7 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.BoundingBox( - coordinates=[0.5, 0.5, 1.5, 1.5] - ), + geometry=data.BoundingBox(coordinates=[0.5, 0.5, 1.5, 1.5]), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], @@ -46,9 +44,7 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.LineString( - coordinates=[[0.5, 0.5], [1.5, 1.5]] - ), + geometry=data.LineString(coordinates=[[0.5, 0.5], [1.5, 1.5]]), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], diff --git a/tests/test_io/test_crowsetta/test_import.py b/tests/test_io/test_crowsetta/test_import.py index b9a1b32..16e688c 100644 --- a/tests/test_io/test_crowsetta/test_import.py +++ b/tests/test_io/test_crowsetta/test_import.py @@ -52,9 +52,9 @@ def test_can_import_all_example_formats( from_file_kwargs = {"audio_path": recording.path} to_annot_kwargs = {"samplerate": recording.samplerate} - annotation = scribe.from_file( - example.annot_path, **from_file_kwargs - ).to_annot(**to_annot_kwargs) + annotation = scribe.from_file(example.annot_path, **from_file_kwargs).to_annot( + **to_annot_kwargs + ) if isinstance(annotation, list): annotation = annotation[0] @@ -62,9 +62,7 @@ def test_can_import_all_example_formats( assert isinstance(annotation, crowsetta.Annotation) if annotation.notated_path is not None: - recording = recording.model_copy( - update=dict(path=annotation.notated_path) - ) + recording = recording.model_copy(update=dict(path=annotation.notated_path)) clip_annotation = crowsetta_io.annotation_to_clip_annotation( annotation, diff --git a/tests/test_io/test_crowsetta/test_labels.py b/tests/test_io/test_crowsetta/test_labels.py index 5316aa4..26b2a15 100644 --- a/tests/test_io/test_crowsetta/test_labels.py +++ b/tests/test_io/test_crowsetta/test_labels.py @@ -136,9 +136,7 @@ def test_label_to_tags_with_key_mapping(): def test_label_to_tags_with_key_mapping_fallback(): key_mapping = {"bat": "animal"} - tag = crowsetta_io.label_to_tags( - "dog", key_mapping=key_mapping, fallback="pet" - ) + tag = crowsetta_io.label_to_tags("dog", key_mapping=key_mapping, fallback="pet") assert tag == [data.Tag(key="pet", value="dog")] diff --git a/tests/test_io/test_crowsetta/test_segments.py b/tests/test_io/test_crowsetta/test_segments.py index 3804d8f..e8bacfe 100644 --- a/tests/test_io/test_crowsetta/test_segments.py +++ b/tests/test_io/test_crowsetta/test_segments.py @@ -95,9 +95,7 @@ def test_segment_from_annotation( def test_segment_from_annotation_fails_if_not_a_time_interval( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point( - coordinates=[0.5, 1] - ) + sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) with pytest.raises(ValueError): crowsetta_io.segment_from_annotation( sound_event_annotation, @@ -108,9 +106,7 @@ def test_segment_from_annotation_fails_if_not_a_time_interval( def test_segment_from_annotation_casts_to_segment( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point( - coordinates=[0.5, 1] - ) + sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) segment = crowsetta_io.segment_from_annotation( sound_event_annotation, cast_to_segment=True, diff --git a/tests/test_io/test_crowsetta/test_sequence.py b/tests/test_io/test_crowsetta/test_sequence.py index 6c4897d..d02e5f6 100644 --- a/tests/test_io/test_crowsetta/test_sequence.py +++ b/tests/test_io/test_crowsetta/test_sequence.py @@ -170,7 +170,5 @@ def test_sequence_to_annotations( recording, ) assert len(annotations) == 2 - assert all( - isinstance(ann, data.SoundEventAnnotation) for ann in annotations - ) + assert all(isinstance(ann, data.SoundEventAnnotation) for ann in annotations) assert all(ann.sound_event.recording == recording for ann in annotations) diff --git a/tests/test_io/test_model_runs.py b/tests/test_io/test_model_runs.py index d9716fd..8b33b49 100644 --- a/tests/test_io/test_model_runs.py +++ b/tests/test_io/test_model_runs.py @@ -138,9 +138,7 @@ def test_can_recover_processed_clip_tags( # Assert assert model_run == recovered assert recovered.clip_predictions[0].tags[0].tag.key == "species" - assert ( - recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" - ) + assert recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" assert recovered.clip_predictions[0].tags[0].score == 0.9 @@ -211,10 +209,7 @@ def test_can_recover_simple_predicted_sound_event( # Assert assert recovered.clip_predictions[0].sound_events[0].score == 0.9 - assert ( - recovered.clip_predictions[0].sound_events[0].sound_event - == sound_event - ) + assert recovered.clip_predictions[0].sound_events[0].sound_event == sound_event assert model_run == recovered @@ -254,10 +249,7 @@ def test_can_recover_predicted_sound_event_with_predicted_tags( recovered = io.load(path, type="model_run") # Assert - assert ( - recovered.clip_predictions[0].sound_events[0].tags[0].tag.key - == "species" - ) + assert recovered.clip_predictions[0].sound_events[0].tags[0].tag.key == "species" assert ( recovered.clip_predictions[0].sound_events[0].tags[0].tag.value == "Myotis lucifugus"