From bd8f473da99d787e8bcf17c6a2e42460ac0df4f5 Mon Sep 17 00:00:00 2001 From: Yanghan Wang Date: Thu, 2 May 2024 16:57:29 -0700 Subject: [PATCH] reapply linter & upgrade black to 24.3.0 Summary: Pull Request resolved: https://github.com/facebookresearch/detectron2/pull/5275 Differential Revision: D56913399 --- .github/workflows/workflow.yml | 4 +- configs/Misc/torchvision_imagenet_R_50.py | 1 - configs/common/train.py | 2 +- demo/demo.py | 27 +++---- detectron2/checkpoint/c2_model_loading.py | 1 - detectron2/data/build.py | 47 +++++++----- detectron2/data/datasets/cityscapes.py | 31 +++----- detectron2/data/datasets/coco.py | 75 ++++++------------- detectron2/data/datasets/lvis.py | 46 ++++-------- detectron2/data/detection_utils.py | 8 +- .../data/samplers/distributed_sampler.py | 6 +- detectron2/engine/defaults.py | 22 +++--- detectron2/export/shared.py | 1 + detectron2/modeling/backbone/regnet.py | 6 +- detectron2/modeling/backbone/resnet.py | 2 +- detectron2/modeling/matcher.py | 2 +- detectron2/modeling/roi_heads/roi_heads.py | 2 +- detectron2/utils/collect_env.py | 19 ++--- detectron2/utils/events.py | 36 +++++---- detectron2/utils/visualizer.py | 30 ++++---- dev/linter.sh | 4 +- projects/DeepLab/train_net.py | 19 +---- projects/DensePose/densepose/data/build.py | 10 +-- .../densepose/modeling/losses/embed_utils.py | 6 +- .../densepose/modeling/losses/utils.py | 6 +- .../densepose/modeling/roi_heads/deeplab.py | 2 +- projects/DensePose/train_net.py | 24 ++---- projects/Panoptic-DeepLab/train_net.py | 25 ++----- projects/PointRend/train_net.py | 15 +--- projects/PointSup/train_net.py | 15 +--- projects/TensorMask/train_net.py | 7 +- projects/TridentNet/train_net.py | 7 +- tools/analyze_model.py | 17 ++--- tools/benchmark.py | 25 ++----- tools/deploy/export_model.py | 42 ++++------- tools/lazyconfig_train_net.py | 30 ++++---- tools/lightning_train_net.py | 20 ++--- tools/plain_train_net.py | 38 +++------- tools/train_net.py | 12 +-- tools/visualize_data.py | 17 +---- tools/visualize_json_results.py | 17 ++--- 41 files changed, 267 insertions(+), 459 deletions(-) diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index 3de246c9a0..791102d02c 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -18,8 +18,8 @@ jobs: # flake8-bugbear flake8-comprehensions are useful but not available internally run: | python -m pip install --upgrade pip - python -m pip install flake8==3.8.1 isort==4.3.21 - python -m pip install black==22.3.0 + python -m pip install flake8==3.12.3 isort==4.3.21 + python -m pip install black==24.3.0 flake8 --version - name: Lint run: | diff --git a/configs/Misc/torchvision_imagenet_R_50.py b/configs/Misc/torchvision_imagenet_R_50.py index 0d75305bcf..20c1f85d66 100644 --- a/configs/Misc/torchvision_imagenet_R_50.py +++ b/configs/Misc/torchvision_imagenet_R_50.py @@ -10,7 +10,6 @@ """ - import torch from torch import nn from torch.nn import functional as F diff --git a/configs/common/train.py b/configs/common/train.py index b6ed02bd59..d2fd9c2f87 100644 --- a/configs/common/train.py +++ b/configs/common/train.py @@ -13,6 +13,6 @@ checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer eval_period=5000, log_period=20, - device="cuda" + device="cuda", # ... ) diff --git a/demo/demo.py b/demo/demo.py index 6bd5bd295a..d01f93501e 100755 --- a/demo/demo.py +++ b/demo/demo.py @@ -2,13 +2,12 @@ import argparse import glob import multiprocessing as mp +import numpy as np import os import tempfile import time import warnings - import cv2 -import numpy as np import tqdm from detectron2.config import get_cfg @@ -32,9 +31,7 @@ def setup_cfg(args): # Set score_threshold for builtin models cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold - cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = ( - args.confidence_threshold - ) + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold cfg.freeze() return cfg @@ -47,9 +44,7 @@ def get_parser(): metavar="FILE", help="path to config file", ) - parser.add_argument( - "--webcam", action="store_true", help="Take inputs from webcam." - ) + parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.") parser.add_argument("--video-input", help="Path to video file.") parser.add_argument( "--input", @@ -118,9 +113,11 @@ def main() -> None: logger.info( "{}: {} in {:.2f}s".format( path, - "detected {} instances".format(len(predictions["instances"])) - if "instances" in predictions - else "finished", + ( + "detected {} instances".format(len(predictions["instances"])) + if "instances" in predictions + else "finished" + ), time.time() - start_time, ) ) @@ -130,9 +127,7 @@ def main() -> None: assert os.path.isdir(args.output), args.output out_filename = os.path.join(args.output, os.path.basename(path)) else: - assert ( - len(args.input) == 1 - ), "Please specify a directory with args.output" + assert len(args.input) == 1, "Please specify a directory with args.output" out_filename = args.output visualized_output.save(out_filename) else: @@ -159,9 +154,7 @@ def main() -> None: num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) basename = os.path.basename(args.video_input) codec, file_ext = ( - ("x264", ".mkv") - if test_opencv_video_format("x264", ".mkv") - else ("mp4v", ".mp4") + ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4") ) if codec == ".mp4v": warnings.warn("x264 codec not available, switching to mp4v") diff --git a/detectron2/checkpoint/c2_model_loading.py b/detectron2/checkpoint/c2_model_loading.py index 0cf8b77a93..551753b15a 100644 --- a/detectron2/checkpoint/c2_model_loading.py +++ b/detectron2/checkpoint/c2_model_loading.py @@ -4,7 +4,6 @@ import re from typing import Dict, List import torch -from tabulate import tabulate def convert_basic_c2_names(original_keys): diff --git a/detectron2/data/build.py b/detectron2/data/build.py index 6529518f11..7287791008 100644 --- a/detectron2/data/build.py +++ b/detectron2/data/build.py @@ -364,7 +364,7 @@ def build_batch_data_loader( collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, generator=generator, - **kwargs + **kwargs, ) # yield individual mapped dict data_loader = AspectRatioGroupedDataset(data_loader, batch_size) if collate_fn is None: @@ -408,12 +408,14 @@ def _build_weighted_sampler(cfg, enable_category_balance=False): name: get_detection_dataset_dicts( [name], filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON - else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN - if cfg.MODEL.LOAD_PROPOSALS - else None, + min_keypoints=( + cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE + if cfg.MODEL.KEYPOINT_ON + else 0 + ), + proposal_files=( + cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None + ), ) for name in cfg.DATASETS.TRAIN } @@ -466,9 +468,9 @@ def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON - else 0, + min_keypoints=( + cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0 + ), proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) @@ -522,7 +524,7 @@ def build_detection_train_loader( aspect_ratio_grouping=True, num_workers=0, collate_fn=None, - **kwargs + **kwargs, ): """ Build a dataloader for object detection with some default features. @@ -574,7 +576,7 @@ def build_detection_train_loader( aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, collate_fn=collate_fn, - **kwargs + **kwargs, ) @@ -589,11 +591,14 @@ def _test_loader_from_config(cfg, dataset_name, mapper=None): dataset = get_detection_dataset_dicts( dataset_name, filter_empty=False, - proposal_files=[ - cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name - ] - if cfg.MODEL.LOAD_PROPOSALS - else None, + proposal_files=( + [ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] + for x in dataset_name + ] + if cfg.MODEL.LOAD_PROPOSALS + else None + ), ) if mapper is None: mapper = DatasetMapper(cfg, False) @@ -601,9 +606,11 @@ def _test_loader_from_config(cfg, dataset_name, mapper=None): "dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS, - "sampler": InferenceSampler(len(dataset)) - if not isinstance(dataset, torchdata.IterableDataset) - else None, + "sampler": ( + InferenceSampler(len(dataset)) + if not isinstance(dataset, torchdata.IterableDataset) + else None + ), } diff --git a/detectron2/data/datasets/cityscapes.py b/detectron2/data/datasets/cityscapes.py index 8a82256725..3a0d717afb 100644 --- a/detectron2/data/datasets/cityscapes.py +++ b/detectron2/data/datasets/cityscapes.py @@ -3,17 +3,16 @@ import json import logging import multiprocessing as mp +import numpy as np import os from itertools import chain - -import numpy as np import pycocotools.mask as mask_util +from PIL import Image from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger -from PIL import Image try: import cv2 # noqa @@ -40,9 +39,7 @@ def _get_cityscapes_files(image_dir, gt_dir): assert basename.endswith(suffix), basename basename = basename[: -len(suffix)] - instance_file = os.path.join( - city_gt_dir, basename + "gtFine_instanceIds.png" - ) + instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png") label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png") json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json") @@ -79,9 +76,7 @@ def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=Tru pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4)) ret = pool.map( - functools.partial( - _cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons - ), + functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons), files, ) logger.info("Loaded {} images from {}".format(len(ret), image_dir)) @@ -110,9 +105,7 @@ def load_cityscapes_semantic(image_dir, gt_dir): ret = [] # gt_dir is small and contain many small files. make sense to fetch to local first gt_dir = PathManager.get_local_path(gt_dir) - for image_file, _, label_file, json_file in _get_cityscapes_files( - image_dir, gt_dir - ): + for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir): label_file = label_file.replace("labelIds", "labelTrainIds") with PathManager.open(json_file, "r") as f: @@ -216,9 +209,7 @@ def _cityscapes_files_to_dict(files, from_json, to_polygons): elif isinstance(poly_wo_overlaps, MultiPolygon): poly_list = poly_wo_overlaps.geoms else: - raise NotImplementedError( - "Unknown geometric structure {}".format(poly_wo_overlaps) - ) + raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps)) poly_coord = [] for poly_el in poly_list: @@ -272,9 +263,9 @@ def _cityscapes_files_to_dict(files, from_json, to_polygons): if to_polygons: # This conversion comes from D4809743 and D5171122, # when Mask-RCNN was first developed. - contours = cv2.findContours( - mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE - )[-2] + contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[ + -2 + ] polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3] # opencv's can produce invalid polygons if len(polygons) == 0: @@ -318,9 +309,7 @@ def main() -> None: ) logger.info("Done loading {} samples.".format(len(dicts))) - thing_classes = [ - k.name for k in labels if k.hasInstances and not k.ignoreInEval - ] + thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval] meta = Metadata().set(thing_classes=thing_classes) else: diff --git a/detectron2/data/datasets/coco.py b/detectron2/data/datasets/coco.py index 5b88f7da36..ec600e5160 100644 --- a/detectron2/data/datasets/coco.py +++ b/detectron2/data/datasets/coco.py @@ -4,18 +4,17 @@ import io import json import logging +import numpy as np import os import shutil - -import numpy as np import pycocotools.mask as mask_util - -from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager from fvcore.common.timer import Timer from iopath.common.file_io import file_lock from PIL import Image +from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes +from detectron2.utils.file_io import PathManager + from .. import DatasetCatalog, MetadataCatalog """ @@ -33,9 +32,7 @@ ] -def load_coco_json( - json_file, image_root, dataset_name=None, extra_annotation_keys=None -): +def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): """ Load a json file with COCO's instances annotation format. Currently supports instance detection, instance segmentation, @@ -76,9 +73,7 @@ def load_coco_json( with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: - logger.info( - "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()) - ) + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) id_map = None if dataset_name is not None: @@ -147,24 +142,20 @@ def load_coco_json( # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len( - ann_ids - ), "Annotation ids in '{}' are not unique!".format(json_file) + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) imgs_anns = list(zip(imgs, anns)) - logger.info( - "Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file) - ) + logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] - ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + ( - extra_annotation_keys or [] - ) + ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 - for (img_dict, anno_dict_list) in imgs_anns: + for img_dict, anno_dict_list in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] @@ -182,9 +173,7 @@ def load_coco_json( # can trigger this assertion. assert anno["image_id"] == image_id - assert ( - anno.get("ignore", 0) == 0 - ), '"ignore" in COCO json file is not supported.' + assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} if "bbox" in obj and len(obj["bbox"]) == 0: @@ -201,9 +190,7 @@ def load_coco_json( segm = mask_util.frPyObjects(segm, *segm["size"]) else: # filter out invalid polygons (< 3 points) - segm = [ - poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6 - ] + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance @@ -281,19 +268,11 @@ def file2id(folder_path, file_path): return image_id input_files = sorted( - ( - os.path.join(image_root, f) - for f in PathManager.ls(image_root) - if f.endswith(image_ext) - ), + (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=lambda file_path: file2id(image_root, file_path), ) gt_files = sorted( - ( - os.path.join(gt_root, f) - for f in PathManager.ls(gt_root) - if f.endswith(gt_ext) - ), + (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) @@ -316,13 +295,11 @@ def file2id(folder_path, file_path): gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( - "Loaded {} images with semantic segmentation from {}".format( - len(input_files), image_root - ) + "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] - for (img_path, gt_path) in zip(input_files, gt_files): + for img_path, gt_path in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path @@ -356,9 +333,7 @@ def convert_to_coco_dict(dataset_name): # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - reverse_id_mapping = { - v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items() - } + reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa @@ -390,9 +365,7 @@ def convert_to_coco_dict(dataset_name): bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: - raise ValueError( - f"bbox has to be 1-dimensional. Got shape={bbox.shape}." - ) + raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") @@ -442,9 +415,7 @@ def convert_to_coco_dict(dataset_name): coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] coco_annotation["area"] = float(area) coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) - coco_annotation["category_id"] = int( - reverse_id_mapper(annotation["category_id"]) - ) + coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) # Add optional fields if "keypoints" in annotation: @@ -505,9 +476,7 @@ def convert_to_coco_json(dataset_name, output_file, allow_cached=True): "You need to clear the cache file if your dataset has been modified." ) else: - logger.info( - f"Converting annotations of dataset '{dataset_name}' to COCO format ...)" - ) + logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") coco_dict = convert_to_coco_dict(dataset_name) logger.info(f"Caching COCO format annotations at '{output_file}' ...") diff --git a/detectron2/data/datasets/lvis.py b/detectron2/data/datasets/lvis.py index 1a28463428..9eda86cec4 100644 --- a/detectron2/data/datasets/lvis.py +++ b/detectron2/data/datasets/lvis.py @@ -1,18 +1,16 @@ # Copyright (c) Facebook, Inc. and its affiliates. import logging import os +from fvcore.common.timer import Timer from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.structures import BoxMode from detectron2.utils.file_io import PathManager -from fvcore.common.timer import Timer from .builtin_meta import _get_coco_instances_meta from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES -from .lvis_v1_category_image_count import ( - LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT, -) +from .lvis_v1_category_image_count import LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT """ This file contains functions to parse LVIS-format annotations into dicts in the @@ -40,9 +38,7 @@ def register_lvis_instances(name, metadata, json_file, image_root): ) -def load_lvis_json( - json_file, image_root, dataset_name=None, extra_annotation_keys=None -): +def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): """ Load a json file in LVIS's annotation format. @@ -71,9 +67,7 @@ def load_lvis_json( timer = Timer() lvis_api = LVIS(json_file) if timer.seconds() > 1: - logger.info( - "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()) - ) + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) if dataset_name is not None: meta = get_lvis_instances_meta(dataset_name) @@ -108,21 +102,17 @@ def load_lvis_json( # Sanity check that each annotation has a unique id ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len( - ann_ids - ), "Annotation ids in '{}' are not unique".format(json_file) + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format( + json_file + ) imgs_anns = list(zip(imgs, anns)) - logger.info( - "Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file) - ) + logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)) if extra_annotation_keys: logger.info( - "The following extra annotation keys will be loaded: {} ".format( - extra_annotation_keys - ) + "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys) ) else: extra_annotation_keys = [] @@ -136,14 +126,12 @@ def get_file_name(img_root, img_dict): dataset_dicts = [] - for (img_dict, anno_dict_list) in imgs_anns: + for img_dict, anno_dict_list in imgs_anns: record = {} record["file_name"] = get_file_name(image_root, img_dict) record["height"] = img_dict["height"] record["width"] = img_dict["width"] - record["not_exhaustive_category_ids"] = img_dict.get( - "not_exhaustive_category_ids", [] - ) + record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) record["neg_category_ids"] = img_dict.get("neg_category_ids", []) image_id = record["image_id"] = img_dict["id"] @@ -157,18 +145,12 @@ def get_file_name(img_root, img_dict): # LVIS data loader can be used to load COCO dataset categories. In this case `meta` # variable will have a field with COCO-specific category mapping. if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta: - obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ - anno["category_id"] - ] + obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]] else: - obj["category_id"] = ( - anno["category_id"] - 1 - ) # Convert 1-indexed to 0-indexed + obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed segm = anno["segmentation"] # list[list[float]] # filter out invalid polygons (< 3 points) - valid_segm = [ - poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6 - ] + valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] assert len(segm) == len( valid_segm ), "Annotation contains an invalid polygon with < 3 points" diff --git a/detectron2/data/detection_utils.py b/detectron2/data/detection_utils.py index ada19bdb4a..8d6173e568 100644 --- a/detectron2/data/detection_utils.py +++ b/detectron2/data/detection_utils.py @@ -195,9 +195,11 @@ def check_image_size(dataset_dict, image): if not image_wh == expected_wh: raise SizeMismatchError( "Mismatched image shape{}, got {}, expect {}.".format( - " for image " + dataset_dict["file_name"] - if "file_name" in dataset_dict - else "", + ( + " for image " + dataset_dict["file_name"] + if "file_name" in dataset_dict + else "" + ), image_wh, expected_wh, ) diff --git a/detectron2/data/samplers/distributed_sampler.py b/detectron2/data/samplers/distributed_sampler.py index 5155c86567..cd3b44a42d 100644 --- a/detectron2/data/samplers/distributed_sampler.py +++ b/detectron2/data/samplers/distributed_sampler.py @@ -191,11 +191,7 @@ def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh, sqrt=Tr category_rep = { cat_id: max( 1.0, - ( - math.sqrt(repeat_thresh / cat_freq) - if sqrt - else (repeat_thresh / cat_freq) - ), + (math.sqrt(repeat_thresh / cat_freq) if sqrt else (repeat_thresh / cat_freq)), ) for cat_id, cat_freq in category_freq.items() } diff --git a/detectron2/engine/defaults.py b/detectron2/engine/defaults.py index ff5625ae86..c649bf8ff7 100644 --- a/detectron2/engine/defaults.py +++ b/detectron2/engine/defaults.py @@ -432,16 +432,18 @@ def build_hooks(self): ret = [ hooks.IterationTimer(), hooks.LRScheduler(), - hooks.PreciseBN( - # Run at the same freq as (but before) evaluation. - cfg.TEST.EVAL_PERIOD, - self.model, - # Build a new data loader to not affect training - self.build_train_loader(cfg), - cfg.TEST.PRECISE_BN.NUM_ITER, - ) - if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) - else None, + ( + hooks.PreciseBN( + # Run at the same freq as (but before) evaluation. + cfg.TEST.EVAL_PERIOD, + self.model, + # Build a new data loader to not affect training + self.build_train_loader(cfg), + cfg.TEST.PRECISE_BN.NUM_ITER, + ) + if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) + else None + ), ] # Do PreciseBN before checkpointer, because it updates the model and need to diff --git a/detectron2/export/shared.py b/detectron2/export/shared.py index fe5b790fa3..99920d4bf4 100644 --- a/detectron2/export/shared.py +++ b/detectron2/export/shared.py @@ -351,6 +351,7 @@ def get_params_from_init_net( params: dict from blob name to numpy array device_options: dict from blob name to the device option of its creating op """ + # NOTE: this assumes that the params is determined by producer op with the # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor. def _get_device_option(producer_op): diff --git a/detectron2/modeling/backbone/regnet.py b/detectron2/modeling/backbone/regnet.py index 3533d63385..56d7baf442 100644 --- a/detectron2/modeling/backbone/regnet.py +++ b/detectron2/modeling/backbone/regnet.py @@ -443,9 +443,9 @@ def default_activation_class(): group_widths=gs, bottleneck_ratios=bs, se_ratio=se_ratio, - activation_class=default_activation_class - if activation_class is None - else activation_class, + activation_class=( + default_activation_class if activation_class is None else activation_class + ), freeze_at=freeze_at, norm=norm, out_features=out_features, diff --git a/detectron2/modeling/backbone/resnet.py b/detectron2/modeling/backbone/resnet.py index 5b8e842c58..7a2263d5e2 100644 --- a/detectron2/modeling/backbone/resnet.py +++ b/detectron2/modeling/backbone/resnet.py @@ -581,7 +581,7 @@ def make_default_stages(depth, block_class=None, **kwargs): in_channels = [64, 256, 512, 1024] out_channels = [256, 512, 1024, 2048] ret = [] - for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): + for n, s, i, o in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): if depth >= 50: kwargs["bottleneck_channels"] = o // 4 ret.append( diff --git a/detectron2/modeling/matcher.py b/detectron2/modeling/matcher.py index be8abc770e..49660fb96a 100644 --- a/detectron2/modeling/matcher.py +++ b/detectron2/modeling/matcher.py @@ -94,7 +94,7 @@ def __call__(self, match_quality_matrix): match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) - for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): + for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l diff --git a/detectron2/modeling/roi_heads/roi_heads.py b/detectron2/modeling/roi_heads/roi_heads.py index 13dd57a047..2f4546cd0c 100644 --- a/detectron2/modeling/roi_heads/roi_heads.py +++ b/detectron2/modeling/roi_heads/roi_heads.py @@ -283,7 +283,7 @@ def label_and_sample_proposals( # like masks, keypoints, etc, will filter the proposals again, # (by foreground/background, or number of keypoints in the image, etc) # so we essentially index the data twice. - for (trg_name, trg_value) in targets_per_image.get_fields().items(): + for trg_name, trg_value in targets_per_image.get_fields().items(): if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): proposals_per_image.set(trg_name, trg_value[sampled_targets]) # If no GT is given in the image, we don't know what a dummy gt value can be. diff --git a/detectron2/utils/collect_env.py b/detectron2/utils/collect_env.py index 27b38bc3bc..a4c0b1f918 100644 --- a/detectron2/utils/collect_env.py +++ b/detectron2/utils/collect_env.py @@ -1,12 +1,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. import importlib +import numpy as np import os import re import subprocess import sys from collections import defaultdict - -import numpy as np import PIL import torch import torchvision @@ -136,9 +135,7 @@ def collect_env_info(): data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) data.append(("PyTorch debug build", torch.version.debug)) try: - data.append( - ("torch._C._GLIBCXX_USE_CXX11_ABI", torch._C._GLIBCXX_USE_CXX11_ABI) - ) + data.append(("torch._C._GLIBCXX_USE_CXX11_ABI", torch._C._GLIBCXX_USE_CXX11_ABI)) except Exception: pass @@ -181,9 +178,7 @@ def collect_env_info(): data.append( ( "torchvision", - str(torchvision.__version__) - + " @" - + os.path.dirname(torchvision.__file__), + str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), ) ) if has_cuda: @@ -228,18 +223,14 @@ def test_nccl_ops(): dist_url = "file:///tmp/nccl_tmp_file" print("Testing NCCL connectivity ... this should not hang.") - mp.spawn( - _test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False - ) + mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) print("NCCL succeeded.") def _test_nccl_worker(rank, num_gpu, dist_url): import torch.distributed as dist - dist.init_process_group( - backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu - ) + dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) dist.barrier(device_ids=[rank]) diff --git a/detectron2/utils/events.py b/detectron2/utils/events.py index 7d582a9a16..c4f9dadfd8 100644 --- a/detectron2/utils/events.py +++ b/detectron2/utils/events.py @@ -296,18 +296,22 @@ def write(self): if "[metric]" in k ] ), - avg_time="time: {:.4f} ".format(avg_iter_time) - if avg_iter_time is not None - else "", - last_time="last_time: {:.4f} ".format(last_iter_time) - if last_iter_time is not None - else "", - avg_data_time="data_time: {:.4f} ".format(avg_data_time) - if avg_data_time is not None - else "", - last_data_time="last_data_time: {:.4f} ".format(last_data_time) - if last_data_time is not None - else "", + avg_time=( + "time: {:.4f} ".format(avg_iter_time) if avg_iter_time is not None else "" + ), + last_time=( + "last_time: {:.4f} ".format(last_iter_time) + if last_iter_time is not None + else "" + ), + avg_data_time=( + "data_time: {:.4f} ".format(avg_data_time) if avg_data_time is not None else "" + ), + last_data_time=( + "last_data_time: {:.4f} ".format(last_data_time) + if last_data_time is not None + else "" + ), lr=lr, memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", ) @@ -461,9 +465,11 @@ def latest_with_smoothing_hint(self, window_size=20): result = {} for k, (v, itr) in self._latest_scalars.items(): result[k] = ( - self._history[k].median(self.count_samples(k, window_size)) - if self._smoothing_hints[k] - else v, + ( + self._history[k].median(self.count_samples(k, window_size)) + if self._smoothing_hints[k] + else v + ), itr, ) return result diff --git a/detectron2/utils/visualizer.py b/detectron2/utils/visualizer.py index 8324dfcdf6..bb6c24ee97 100644 --- a/detectron2/utils/visualizer.py +++ b/detectron2/utils/visualizer.py @@ -377,9 +377,10 @@ def __init__( self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 - self._default_font_size = max( - np.sqrt(self.output.height * self.output.width) // 90, 10 // scale - ) * font_size_scale + self._default_font_size = ( + max(np.sqrt(self.output.height * self.output.width) // 90, 10 // scale) + * font_size_scale + ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD @@ -410,13 +411,14 @@ def draw_instance_predictions(self, predictions, jittering: bool = True): masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes - ] if jittering else [ - tuple( - mplc.to_rgb([x / 255 for x in self.metadata.thing_colors[c]]) - ) for c in classes - ] + colors = ( + [self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes] + if jittering + else [ + tuple(mplc.to_rgb([x / 255 for x in self.metadata.thing_colors[c]])) + for c in classes + ] + ) alpha = 0.8 else: @@ -568,9 +570,11 @@ def draw_dataset_dict(self, dic): keypts = None boxes = [ - BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) - if len(x["bbox"]) == 4 - else x["bbox"] + ( + BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) + if len(x["bbox"]) == 4 + else x["bbox"] + ) for x in annos ] diff --git a/dev/linter.sh b/dev/linter.sh index 55793e0181..fb1d514e71 100755 --- a/dev/linter.sh +++ b/dev/linter.sh @@ -5,9 +5,9 @@ cd "$(dirname "${BASH_SOURCE[0]}")/.." { - black --version | grep -E "22\." > /dev/null + black --version | grep -E "24\." > /dev/null } || { - echo "Linter requires 'black==22.*' !" + echo "Linter requires 'black==24.*' !" exit 1 } diff --git a/projects/DeepLab/train_net.py b/projects/DeepLab/train_net.py index b3f514ea57..3de57fa028 100755 --- a/projects/DeepLab/train_net.py +++ b/projects/DeepLab/train_net.py @@ -12,18 +12,9 @@ import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.data import build_detection_train_loader, DatasetMapper, MetadataCatalog -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) -from detectron2.evaluation import ( - CityscapesSemSegEvaluator, - DatasetEvaluators, - SemSegEvaluator, -) +from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import CityscapesSemSegEvaluator, DatasetEvaluators, SemSegEvaluator from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler @@ -89,9 +80,7 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None): @classmethod def build_train_loader(cls, cfg): if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: - mapper = DatasetMapper( - cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg) - ) + mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg)) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) diff --git a/projects/DensePose/densepose/data/build.py b/projects/DensePose/densepose/data/build.py index e9ae5d595a..06e8e8f782 100644 --- a/projects/DensePose/densepose/data/build.py +++ b/projects/DensePose/densepose/data/build.py @@ -484,11 +484,11 @@ def build_detection_test_loader(cfg, dataset_name, mapper=None): dataset_dicts = combine_detection_dataset_dicts( [dataset_name], keep_instance_predicate=_get_test_keep_instance_predicate(cfg), - proposal_files=[ - cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)] - ] - if cfg.MODEL.LOAD_PROPOSALS - else None, + proposal_files=( + [cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]] + if cfg.MODEL.LOAD_PROPOSALS + else None + ), ) sampler = None if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE: diff --git a/projects/DensePose/densepose/modeling/losses/embed_utils.py b/projects/DensePose/densepose/modeling/losses/embed_utils.py index 894417fe1a..92210f002c 100644 --- a/projects/DensePose/densepose/modeling/losses/embed_utils.py +++ b/projects/DensePose/densepose/modeling/losses/embed_utils.py @@ -126,9 +126,9 @@ def pack(self) -> Optional[PackedCseAnnotations]: vertex_mesh_ids_gt=torch.cat(self.vertex_mesh_ids_gt, 0), vertex_ids_gt=torch.cat(self.vertex_ids_gt, 0), # ignore segmentation annotations, if not all the instances contain those - coarse_segm_gt=torch.cat(self.s_gt, 0) - if len(self.s_gt) == len(self.bbox_xywh_gt) - else None, + coarse_segm_gt=( + torch.cat(self.s_gt, 0) if len(self.s_gt) == len(self.bbox_xywh_gt) else None + ), bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0), bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0), point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0), diff --git a/projects/DensePose/densepose/modeling/losses/utils.py b/projects/DensePose/densepose/modeling/losses/utils.py index 8193c537c1..f865798760 100644 --- a/projects/DensePose/densepose/modeling/losses/utils.py +++ b/projects/DensePose/densepose/modeling/losses/utils.py @@ -404,9 +404,9 @@ def pack(self) -> Optional[PackedChartBasedAnnotations]: u_gt=torch.cat(self.u_gt, 0), v_gt=torch.cat(self.v_gt, 0), # ignore segmentation annotations, if not all the instances contain those - coarse_segm_gt=torch.cat(self.s_gt, 0) - if len(self.s_gt) == len(self.bbox_xywh_gt) - else None, + coarse_segm_gt=( + torch.cat(self.s_gt, 0) if len(self.s_gt) == len(self.bbox_xywh_gt) else None + ), bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0), bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0), point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0).long(), diff --git a/projects/DensePose/densepose/modeling/roi_heads/deeplab.py b/projects/DensePose/densepose/modeling/roi_heads/deeplab.py index df2c13fa2f..6f42d20681 100644 --- a/projects/DensePose/densepose/modeling/roi_heads/deeplab.py +++ b/projects/DensePose/densepose/modeling/roi_heads/deeplab.py @@ -130,7 +130,7 @@ def __init__(self, in_channels, atrous_rates, out_channels): self.project = nn.Sequential( nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), # nn.BatchNorm2d(out_channels), - nn.ReLU() + nn.ReLU(), # nn.Dropout(0.5) ) diff --git a/projects/DensePose/train_net.py b/projects/DensePose/train_net.py index fbefc9b03b..6c06011830 100755 --- a/projects/DensePose/train_net.py +++ b/projects/DensePose/train_net.py @@ -12,22 +12,16 @@ from datetime import timedelta import detectron2.utils.comm as comm - -from densepose import add_densepose_config -from densepose.engine import Trainer -from densepose.modeling.densepose_checkpoint import DensePoseCheckpointer from detectron2.config import get_cfg -from detectron2.engine import ( - default_argument_parser, - default_setup, - DEFAULT_TIMEOUT, - hooks, - launch, -) +from detectron2.engine import DEFAULT_TIMEOUT, default_argument_parser, default_setup, hooks, launch from detectron2.evaluation import verify_results from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger +from densepose import add_densepose_config +from densepose.engine import Trainer +from densepose.modeling.densepose_checkpoint import DensePoseCheckpointer + def setup(args): cfg = get_cfg() @@ -37,9 +31,7 @@ def setup(args): cfg.freeze() default_setup(cfg, args) # Setup logger for "densepose" module - setup_logger( - output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose" - ) + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose") return cfg @@ -74,9 +66,7 @@ def invoke_main() -> None: args = default_argument_parser().parse_args() cfg = setup(args) timeout = ( - DEFAULT_TIMEOUT - if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE - else timedelta(hours=4) + DEFAULT_TIMEOUT if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE else timedelta(hours=4) ) print("Command Line Args:", args) launch( diff --git a/projects/Panoptic-DeepLab/train_net.py b/projects/Panoptic-DeepLab/train_net.py index 0a25e60323..ac6addcffb 100644 --- a/projects/Panoptic-DeepLab/train_net.py +++ b/projects/Panoptic-DeepLab/train_net.py @@ -7,18 +7,13 @@ """ import os +import torch import detectron2.data.transforms as T -import torch from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.data import build_detection_train_loader, MetadataCatalog -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, @@ -28,8 +23,8 @@ ) from detectron2.projects.deeplab import build_lr_scheduler from detectron2.projects.panoptic_deeplab import ( - add_panoptic_deeplab_config, PanopticDeeplabDatasetMapper, + add_panoptic_deeplab_config, ) from detectron2.solver import get_default_optimizer_params from detectron2.solver.build import maybe_add_gradient_clipping @@ -86,9 +81,7 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None): "coco_2017_val_100_panoptic": "coco_2017_val_100", } evaluator_list.append( - COCOEvaluator( - dataset_name_mapper[dataset_name], output_dir=output_folder - ) + COCOEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder) ) if len(evaluator_list) == 0: raise NotImplementedError( @@ -102,9 +95,7 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None): @classmethod def build_train_loader(cls, cfg): - mapper = PanopticDeeplabDatasetMapper( - cfg, augmentations=build_sem_seg_train_aug(cfg) - ) + mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg)) return build_detection_train_loader(cfg, mapper=mapper) @classmethod @@ -135,9 +126,7 @@ def build_optimizer(cls, cfg, model): nesterov=cfg.SOLVER.NESTEROV, ) elif optimizer_type == "ADAM": - return maybe_add_gradient_clipping(cfg, torch.optim.Adam)( - params, cfg.SOLVER.BASE_LR - ) + return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(params, cfg.SOLVER.BASE_LR) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") diff --git a/projects/PointRend/train_net.py b/projects/PointRend/train_net.py index 418ae9f866..a490658e1e 100755 --- a/projects/PointRend/train_net.py +++ b/projects/PointRend/train_net.py @@ -13,13 +13,8 @@ import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.data import build_detection_train_loader, DatasetMapper, MetadataCatalog -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) +from detectron2.data import DatasetMapper, MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, @@ -29,7 +24,7 @@ SemSegEvaluator, verify_results, ) -from detectron2.projects.point_rend import add_pointrend_config, ColorAugSSDTransform +from detectron2.projects.point_rend import ColorAugSSDTransform, add_pointrend_config def build_sem_seg_train_aug(cfg): @@ -102,9 +97,7 @@ def build_evaluator(cls, cfg, dataset_name, output_folder=None): @classmethod def build_train_loader(cls, cfg): if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: - mapper = DatasetMapper( - cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg) - ) + mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg)) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) diff --git a/projects/PointSup/train_net.py b/projects/PointSup/train_net.py index 05291fa092..68f86c9cf4 100755 --- a/projects/PointSup/train_net.py +++ b/projects/PointSup/train_net.py @@ -11,18 +11,13 @@ import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.data import build_detection_train_loader, MetadataCatalog -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results from detectron2.projects.point_rend import add_pointrend_config from detectron2.utils.logger import setup_logger -from point_sup import add_point_sup_config, PointSupDatasetMapper +from point_sup import PointSupDatasetMapper, add_point_sup_config class Trainer(DefaultTrainer): @@ -78,9 +73,7 @@ def setup(args): cfg.freeze() default_setup(cfg, args) # Setup logger for "point_sup" module - setup_logger( - output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup" - ) + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="point_sup") return cfg diff --git a/projects/TensorMask/train_net.py b/projects/TensorMask/train_net.py index ff481797bd..aeb2cd2fc2 100755 --- a/projects/TensorMask/train_net.py +++ b/projects/TensorMask/train_net.py @@ -12,12 +12,7 @@ import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator, verify_results from tensormask import add_tensormask_config diff --git a/projects/TridentNet/train_net.py b/projects/TridentNet/train_net.py index a34c3fdc73..5e5c5fe43a 100755 --- a/projects/TridentNet/train_net.py +++ b/projects/TridentNet/train_net.py @@ -11,12 +11,7 @@ from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - launch, -) +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator from tridentnet import add_tridentnet_config diff --git a/tools/analyze_model.py b/tools/analyze_model.py index 543b14e80b..ba177d7cd9 100755 --- a/tools/analyze_model.py +++ b/tools/analyze_model.py @@ -2,23 +2,22 @@ # Copyright (c) Facebook, Inc. and its affiliates. import logging -from collections import Counter - import numpy as np +from collections import Counter import tqdm +from fvcore.nn import flop_count_table # can also try flop_count_str from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import CfgNode, get_cfg, instantiate, LazyConfig +from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate from detectron2.data import build_detection_test_loader from detectron2.engine import default_argument_parser from detectron2.modeling import build_model from detectron2.utils.analysis import ( - activation_count_operators, FlopCountAnalysis, + activation_count_operators, parameter_count_table, ) from detectron2.utils.logger import setup_logger -from fvcore.nn import flop_count_table # can also try flop_count_str logger = logging.getLogger("detectron2") @@ -59,17 +58,13 @@ def do_flop(cfg): counts += flops.by_operator() total_flops.append(flops.total()) - logger.info( - "Flops table computed from only one input sample:\n" + flop_count_table(flops) - ) + logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops)) logger.info( "Average GFlops for each type of operators:\n" + str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()]) ) logger.info( - "Total GFlops: {:.1f}±{:.1f}".format( - np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9 - ) + "Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9) ) diff --git a/tools/benchmark.py b/tools/benchmark.py index 418a723f64..c80c8ca76f 100755 --- a/tools/benchmark.py +++ b/tools/benchmark.py @@ -8,34 +8,27 @@ import itertools import logging - import psutil import torch import tqdm +from fvcore.common.timer import Timer +from torch.nn.parallel import DistributedDataParallel from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg, instantiate, LazyConfig +from detectron2.config import LazyConfig, get_cfg, instantiate from detectron2.data import ( + DatasetFromList, build_detection_test_loader, build_detection_train_loader, - DatasetFromList, ) from detectron2.data.benchmark import DataLoaderBenchmark -from detectron2.engine import ( - AMPTrainer, - default_argument_parser, - hooks, - launch, - SimpleTrainer, -) +from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch from detectron2.modeling import build_model from detectron2.solver import build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.events import CommonMetricPrinter from detectron2.utils.logger import setup_logger -from fvcore.common.timer import Timer -from torch.nn.parallel import DistributedDataParallel logger = logging.getLogger("detectron2") @@ -124,9 +117,7 @@ def f(): yield from data max_iter = 400 - trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( - model, f(), optimizer - ) + trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer) trainer.register_hooks( [ hooks.IterationTimer(), @@ -183,9 +174,7 @@ def f(): def main() -> None: parser = default_argument_parser() - parser.add_argument( - "--task", choices=["train", "eval", "data", "data_advanced"], required=True - ) + parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True) args = parser.parse_args() assert not args.eval_only diff --git a/tools/deploy/export_model.py b/tools/deploy/export_model.py index 041f912427..560143d777 100755 --- a/tools/deploy/export_model.py +++ b/tools/deploy/export_model.py @@ -3,27 +3,27 @@ import argparse import os from typing import Dict, List, Tuple +import torch +from torch import Tensor, nn import detectron2.data.transforms as T -import torch from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import build_detection_test_loader, detection_utils from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format from detectron2.export import ( - dump_torchscript_IR, - scripting_with_instances, STABLE_ONNX_OPSET_VERSION, TracingAdapter, + dump_torchscript_IR, + scripting_with_instances, ) -from detectron2.modeling import build_model, GeneralizedRCNN, RetinaNet +from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model from detectron2.modeling.postprocessing import detector_postprocess from detectron2.projects.point_rend import add_pointrend_config from detectron2.structures import Boxes from detectron2.utils.env import TORCH_VERSION from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger -from torch import nn, Tensor def setup_cfg(args): @@ -85,18 +85,14 @@ def __init__(self): if isinstance(torch_model, GeneralizedRCNN): class ScriptableAdapter(ScriptableAdapterBase): - def forward( - self, inputs: Tuple[Dict[str, torch.Tensor]] - ) -> List[Dict[str, Tensor]]: + def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: instances = self.model.inference(inputs, do_postprocess=False) return [i.get_fields() for i in instances] else: class ScriptableAdapter(ScriptableAdapterBase): - def forward( - self, inputs: Tuple[Dict[str, torch.Tensor]] - ) -> List[Dict[str, Tensor]]: + def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]: instances = self.model(inputs) return [i.get_fields() for i in instances] @@ -133,9 +129,7 @@ def inference(model, inputs): dump_torchscript_IR(ts_model, args.output) elif args.format == "onnx": with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f: - torch.onnx.export( - traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION - ) + torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION) logger.info("Inputs schema: " + str(traceable_model.inputs_schema)) logger.info("Outputs schema: " + str(traceable_model.outputs_schema)) @@ -150,9 +144,7 @@ def eval_wrapper(inputs): unused in deployment but needed for evaluation. We add it manually here. """ input = inputs[0] - instances = traceable_model.outputs_schema(ts_model(input["image"]))[0][ - "instances" - ] + instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"] postprocessed = detector_postprocess(instances, input["height"], input["width"]) return [{"instances": postprocessed}] @@ -168,9 +160,7 @@ def get_sample_inputs(args): return first_batch else: # get a sample data - original_image = detection_utils.read_image( - args.sample_image, format=cfg.INPUT.FORMAT - ) + original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT) # Do same preprocessing as DefaultPredictor aug = T.ResizeShortestEdge( [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST @@ -201,12 +191,8 @@ def main() -> None: help="Method to export models", default="tracing", ) - parser.add_argument( - "--config-file", default="", metavar="FILE", help="path to config file" - ) - parser.add_argument( - "--sample-image", default=None, type=str, help="sample image for input" - ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument("--sample-image", default=None, type=str, help="sample image for input") parser.add_argument("--run-eval", action="store_true") parser.add_argument("--output", help="output directory for the converted model") parser.add_argument( @@ -245,9 +231,7 @@ def main() -> None: "Python inference is not yet implemented for " f"export_method={args.export_method}, format={args.format}." ) - logger.info( - "Running evaluation ... this takes a long time if you export to CPU." - ) + logger.info("Running evaluation ... this takes a long time if you export to CPU.") dataset = cfg.DATASETS.TEST[0] data_loader = build_detection_test_loader(cfg, dataset) # NOTE: hard-coded evaluator. change to the evaluator for your dataset diff --git a/tools/lazyconfig_train_net.py b/tools/lazyconfig_train_net.py index 27f0cc056b..59ae5c887a 100755 --- a/tools/lazyconfig_train_net.py +++ b/tools/lazyconfig_train_net.py @@ -15,15 +15,15 @@ import logging from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import instantiate, LazyConfig +from detectron2.config import LazyConfig, instantiate from detectron2.engine import ( AMPTrainer, + SimpleTrainer, default_argument_parser, default_setup, default_writers, hooks, launch, - SimpleTrainer, ) from detectron2.engine.defaults import create_ddp_model from detectron2.evaluation import inference_on_dataset, print_csv_format @@ -73,9 +73,7 @@ def do_train(args, cfg): train_loader = instantiate(cfg.dataloader.train) model = create_ddp_model(model, **cfg.train.ddp) - trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)( - model, train_loader, optim - ) + trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim) checkpointer = DetectionCheckpointer( model, cfg.train.output_dir, @@ -85,16 +83,20 @@ def do_train(args, cfg): [ hooks.IterationTimer(), hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)), - hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) - if comm.is_main_process() - else None, + ( + hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer) + if comm.is_main_process() + else None + ), hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)), - hooks.PeriodicWriter( - default_writers(cfg.train.output_dir, cfg.train.max_iter), - period=cfg.train.log_period, - ) - if comm.is_main_process() - else None, + ( + hooks.PeriodicWriter( + default_writers(cfg.train.output_dir, cfg.train.max_iter), + period=cfg.train.log_period, + ) + if comm.is_main_process() + else None + ), ] ) diff --git a/tools/lightning_train_net.py b/tools/lightning_train_net.py index c711a7173b..87cfe84feb 100644 --- a/tools/lightning_train_net.py +++ b/tools/lightning_train_net.py @@ -11,19 +11,20 @@ import weakref from collections import OrderedDict from typing import Any, Dict, List +import pytorch_lightning as pl # type: ignore +from pytorch_lightning import LightningDataModule, LightningModule import detectron2.utils.comm as comm -import pytorch_lightning as pl # type: ignore from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import build_detection_test_loader, build_detection_train_loader from detectron2.engine import ( + DefaultTrainer, + SimpleTrainer, default_argument_parser, default_setup, default_writers, - DefaultTrainer, hooks, - SimpleTrainer, ) from detectron2.evaluation import print_csv_format from detectron2.evaluation.testing import flatten_results_dict @@ -31,7 +32,6 @@ from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils.events import EventStorage from detectron2.utils.logger import setup_logger -from pytorch_lightning import LightningDataModule, LightningModule from train_net import build_evaluator @@ -65,9 +65,7 @@ def setup(self, stage: str): self.model, self.cfg.OUTPUT_DIR, ) - logger.info( - f"Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}." - ) + logger.info(f"Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}.") # Only load weights, use lightning checkpointing if you want to resume self.checkpointer.load(self.cfg.MODEL.WEIGHTS) @@ -197,9 +195,7 @@ def train(cfg, args): # sure max_steps is met first "max_epochs": 10**8, "max_steps": cfg.SOLVER.MAX_ITER, - "val_check_interval": cfg.TEST.EVAL_PERIOD - if cfg.TEST.EVAL_PERIOD > 0 - else 10**8, + "val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else 10**8, "num_nodes": args.num_machines, "gpus": args.num_gpus, "num_sanity_val_steps": 0, @@ -214,9 +210,7 @@ def train(cfg, args): logger.info(f"Resuming training from checkpoint: {last_checkpoint}.") trainer = pl.Trainer(**trainer_params) - logger.info( - f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs" - ) + logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs") module = TrainingModule(cfg) data_module = DataModule(cfg) diff --git a/tools/plain_train_net.py b/tools/plain_train_net.py index 89de94541d..0c37336889 100755 --- a/tools/plain_train_net.py +++ b/tools/plain_train_net.py @@ -22,38 +22,33 @@ import logging import os from collections import OrderedDict +import torch +from torch.nn.parallel import DistributedDataParallel import detectron2.utils.comm as comm -import torch from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer from detectron2.config import get_cfg from detectron2.data import ( + MetadataCatalog, build_detection_test_loader, build_detection_train_loader, - MetadataCatalog, -) -from detectron2.engine import ( - default_argument_parser, - default_setup, - default_writers, - launch, ) +from detectron2.engine import default_argument_parser, default_setup, default_writers, launch from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, - inference_on_dataset, LVISEvaluator, PascalVOCDetectionEvaluator, - print_csv_format, SemSegEvaluator, + inference_on_dataset, + print_csv_format, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils.events import EventStorage -from torch.nn.parallel import DistributedDataParallel logger = logging.getLogger("detectron2") @@ -91,9 +86,7 @@ def get_evaluator(cfg, dataset_name, output_folder=None): return LVISEvaluator(dataset_name, cfg, True, output_folder) if len(evaluator_list) == 0: raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format( - dataset_name, evaluator_type - ) + "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) ) if len(evaluator_list) == 1: return evaluator_list[0] @@ -126,10 +119,7 @@ def do_train(cfg, model, resume=False): model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( - checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get( - "iteration", -1 - ) - + 1 + checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 ) max_iter = cfg.SOLVER.MAX_ITER @@ -137,9 +127,7 @@ def do_train(cfg, model, resume=False): checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) - writers = ( - default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else [] - ) + writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else [] # compared to "train_net.py", we do not support accurate timing and # precise BN here, because they are not trivial to implement in a small training loop @@ -153,9 +141,7 @@ def do_train(cfg, model, resume=False): losses = sum(loss_dict.values()) assert torch.isfinite(losses).all(), loss_dict - loss_dict_reduced = { - k: v.item() for k, v in comm.reduce_dict(loss_dict).items() - } + loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) @@ -163,9 +149,7 @@ def do_train(cfg, model, resume=False): optimizer.zero_grad() losses.backward() optimizer.step() - storage.put_scalar( - "lr", optimizer.param_groups[0]["lr"], smoothing_hint=False - ) + storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) scheduler.step() if ( diff --git a/tools/train_net.py b/tools/train_net.py index 6f5aa669dc..a82a8dfb5f 100755 --- a/tools/train_net.py +++ b/tools/train_net.py @@ -24,13 +24,7 @@ from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog -from detectron2.engine import ( - default_argument_parser, - default_setup, - DefaultTrainer, - hooks, - launch, -) +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, @@ -78,9 +72,7 @@ def build_evaluator(cfg, dataset_name, output_folder=None): return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format( - dataset_name, evaluator_type - ) + "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) ) elif len(evaluator_list) == 1: return evaluator_list[0] diff --git a/tools/visualize_data.py b/tools/visualize_data.py index 7e585df081..25d569cd95 100755 --- a/tools/visualize_data.py +++ b/tools/visualize_data.py @@ -3,17 +3,12 @@ import argparse import os from itertools import chain - import cv2 import tqdm from detectron2.config import get_cfg -from detectron2.data import ( - build_detection_train_loader, - DatasetCatalog, - detection_utils as utils, - MetadataCatalog, -) +from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader +from detectron2.data import detection_utils as utils from detectron2.data.build import filter_images_with_few_keypoints from detectron2.utils.logger import setup_logger from detectron2.utils.visualizer import Visualizer @@ -81,9 +76,7 @@ def output(vis, fname): visualizer = Visualizer(img, metadata=metadata, scale=scale) target_fields = per_image["instances"].get_fields() - labels = [ - metadata.thing_classes[i] for i in target_fields["gt_classes"] - ] + labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]] vis = visualizer.overlay_instances( labels=labels, boxes=target_fields.get("gt_boxes", None), @@ -92,9 +85,7 @@ def output(vis, fname): ) output(vis, str(per_image["image_id"]) + ".jpg") else: - dicts = list( - chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]) - ) + dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN])) if cfg.MODEL.KEYPOINT_ON: dicts = filter_images_with_few_keypoints(dicts, 1) for dic in tqdm.tqdm(dicts): diff --git a/tools/visualize_json_results.py b/tools/visualize_json_results.py index 9d374aa1e1..e32d80e35b 100755 --- a/tools/visualize_json_results.py +++ b/tools/visualize_json_results.py @@ -3,11 +3,10 @@ import argparse import json +import numpy as np import os from collections import defaultdict - import cv2 -import numpy as np import tqdm from detectron2.data import DatasetCatalog, MetadataCatalog @@ -44,19 +43,13 @@ def main() -> None: parser = argparse.ArgumentParser( description="A script that visualizes the json predictions from COCO or LVIS dataset." ) - parser.add_argument( - "--input", required=True, help="JSON file produced by the model" - ) + parser.add_argument("--input", required=True, help="JSON file produced by the model") parser.add_argument("--output", required=True, help="output directory") - parser.add_argument( - "--dataset", help="name of the dataset", default="coco_2017_val" - ) - parser.add_argument( - "--conf-threshold", default=0.5, type=float, help="confidence threshold" - ) + parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") + parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") args = parser.parse_args() - logger = setup_logger() + setup_logger() with PathManager.open(args.input, "r") as f: predictions = json.load(f)