From f113cf3cfc32161e739bee5089fe7307b606bd96 Mon Sep 17 00:00:00 2001 From: Jay Qi Date: Sat, 27 Jan 2024 22:11:27 -0500 Subject: [PATCH] Update for black 24.1.0 --- requirements-dev/lint.txt | 2 +- zamba/models/densepose/densepose_manager.py | 16 ++++++++------ zamba/models/model_manager.py | 24 +++++++++++++-------- zamba/models/slowfast_models.py | 6 +++--- zamba/pytorch/dataloaders.py | 16 ++++++++------ zamba/pytorch/transforms.py | 8 ++++--- 6 files changed, 44 insertions(+), 28 deletions(-) diff --git a/requirements-dev/lint.txt b/requirements-dev/lint.txt index 4e92b9de..9c67f563 100644 --- a/requirements-dev/lint.txt +++ b/requirements-dev/lint.txt @@ -1,2 +1,2 @@ -black +black>=24.1.0 flake8 diff --git a/zamba/models/densepose/densepose_manager.py b/zamba/models/densepose/densepose_manager.py index cf9aaf90..e111bdb9 100644 --- a/zamba/models/densepose/densepose_manager.py +++ b/zamba/models/densepose/densepose_manager.py @@ -253,12 +253,16 @@ def serialize_image_output(self, instances, filename=None, write_embeddings=Fals "value": labels[i], "mesh_name": self.vis_class_to_mesh_name[labels[i]], }, - "embedding": pose_result.embedding[[i], ...].cpu().tolist() - if write_embeddings - else None, - "segmentation": pose_result.coarse_segm[[i], ...].cpu().tolist() - if write_embeddings - else None, + "embedding": ( + pose_result.embedding[[i], ...].cpu().tolist() + if write_embeddings + else None + ), + "segmentation": ( + pose_result.coarse_segm[[i], ...].cpu().tolist() + if write_embeddings + else None + ), } for i in range(len(instances)) ] diff --git a/zamba/models/model_manager.py b/zamba/models/model_manager.py index 7bf79d48..a80c8014 100644 --- a/zamba/models/model_manager.py +++ b/zamba/models/model_manager.py @@ -258,12 +258,16 @@ def train_model( model_checkpoint = ModelCheckpoint( dirpath=logging_and_save_dir, filename=train_config.model_name, - monitor=train_config.early_stopping_config.monitor - if train_config.early_stopping_config is not None - else None, - mode=train_config.early_stopping_config.mode - if train_config.early_stopping_config is not None - else "min", + monitor=( + train_config.early_stopping_config.monitor + if train_config.early_stopping_config is not None + else None + ), + mode=( + train_config.early_stopping_config.mode + if train_config.early_stopping_config is not None + else "min" + ), ) callbacks = [model_checkpoint] @@ -283,9 +287,11 @@ def train_model( logger=tensorboard_logger, callbacks=callbacks, fast_dev_run=train_config.dry_run, - strategy=DDPStrategy(find_unused_parameters=False) - if (data_module.multiprocessing_context is not None) and (train_config.gpus > 1) - else "auto", + strategy=( + DDPStrategy(find_unused_parameters=False) + if (data_module.multiprocessing_context is not None) and (train_config.gpus > 1) + else "auto" + ), ) if video_loader_config.cache_dir is None: diff --git a/zamba/models/slowfast_models.py b/zamba/models/slowfast_models.py index 58e12501..343582db 100644 --- a/zamba/models/slowfast_models.py +++ b/zamba/models/slowfast_models.py @@ -74,9 +74,9 @@ def __init__( ), activation=None, pool=None, - dropout=None - if post_backbone_dropout is None - else torch.nn.Dropout(post_backbone_dropout), + dropout=( + None if post_backbone_dropout is None else torch.nn.Dropout(post_backbone_dropout) + ), output_pool=torch.nn.AdaptiveAvgPool3d(1), ) diff --git a/zamba/pytorch/dataloaders.py b/zamba/pytorch/dataloaders.py index ed9008bd..d6e7cbbb 100644 --- a/zamba/pytorch/dataloaders.py +++ b/zamba/pytorch/dataloaders.py @@ -121,12 +121,16 @@ def __getitem__(self, index: int): video = np.zeros( ( self.video_loader_config.total_frames, - self.video_loader_config.model_input_height - if self.video_loader_config.model_input_height is not None - else self.video_loader_config.frame_selection_height, - self.video_loader_config.model_input_width - if self.video_loader_config.model_input_width is not None - else self.video_loader_config.frame_selection_width, + ( + self.video_loader_config.model_input_height + if self.video_loader_config.model_input_height is not None + else self.video_loader_config.frame_selection_height + ), + ( + self.video_loader_config.model_input_width + if self.video_loader_config.model_input_width is not None + else self.video_loader_config.frame_selection_width + ), 3, ), dtype="int", diff --git a/zamba/pytorch/transforms.py b/zamba/pytorch/transforms.py index d635527a..0627f774 100644 --- a/zamba/pytorch/transforms.py +++ b/zamba/pytorch/transforms.py @@ -76,9 +76,11 @@ def compute_left_and_right_pad(original_size: int, padded_size: int) -> Tuple[in def forward(self, vid: torch.Tensor) -> torch.Tensor: padding = tuple( itertools.chain.from_iterable( - (0, 0) - if padded_size is None - else self.compute_left_and_right_pad(original_size, padded_size) + ( + (0, 0) + if padded_size is None + else self.compute_left_and_right_pad(original_size, padded_size) + ) for original_size, padded_size in zip(vid.shape, self.dimension_sizes) ) )