From 275ca9b44d68ca0cb910c08156560a4ee23caab5 Mon Sep 17 00:00:00 2001 From: dnth Date: Fri, 21 Jan 2022 16:07:20 +0800 Subject: [PATCH 1/3] updated configs to 2.20.0 --- configs/albu_example/README.md | 16 ++ configs/atss/README.md | 16 +- configs/autoassign/README.md | 16 +- configs/carafe/README.md | 16 +- configs/carafe/metafile.yml | 55 +++++ configs/cascade_rcnn/README.md | 16 +- configs/cascade_rpn/README.md | 32 ++- configs/centernet/README.md | 18 +- configs/centripetalnet/README.md | 18 +- configs/cityscapes/README.md | 19 +- configs/common/lsj_100e_coco_instance.py | 90 ++++++++ configs/cornernet/README.md | 18 +- configs/dcn/README.md | 18 +- ...k_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py | 0 ..._rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py | 0 configs/dcn/metafile.yml | 56 ++++- configs/deepfashion/README.md | 22 +- configs/deformable_detr/README.md | 18 +- configs/detectors/README.md | 18 +- configs/detr/README.md | 18 +- configs/double_heads/README.md | 16 +- configs/dynamic_rcnn/README.md | 16 +- configs/empirical_attention/README.md | 16 +- configs/fast_rcnn/README.md | 16 +- configs/faster_rcnn/README.md | 16 +- .../faster_rcnn_r50_caffe_fpn_90k_coco.py | 15 ++ .../faster_rcnn_r50_fpn_fp16_1x_coco.py | 3 + configs/faster_rcnn/metafile.yml | 110 ++++++---- configs/fcos/README.md | 30 ++- configs/fcos/metafile.yml | 14 +- configs/foveabox/README.md | 16 ++ configs/fp16/README.md | 24 --- .../fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py | 3 - .../fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py | 3 - configs/fp16/metafile.yml | 113 ---------- .../fp16/retinanet_r50_fpn_fp16_1x_coco.py | 3 - configs/fpg/README.md | 16 +- configs/free_anchor/README.md | 16 +- configs/fsaf/README.md | 16 ++ configs/gcnet/README.md | 22 +- configs/gfl/README.md | 16 +- configs/ghm/README.md | 16 +- configs/gn+ws/README.md | 16 +- configs/gn/README.md | 16 +- configs/grid_rcnn/README.md | 18 +- configs/groie/README.md | 42 ++-- configs/guided_anchoring/README.md | 16 +- configs/hrnet/README.md | 19 +- configs/htc/README.md | 16 +- configs/instaboost/README.md | 23 +- configs/lad/README.md | 50 +++++ configs/lad/lad_r101_paa_r50_fpn_coco_1x.py | 121 +++++++++++ configs/lad/lad_r50_paa_r101_fpn_coco_1x.py | 120 +++++++++++ configs/ld/README.md | 16 +- configs/libra_rcnn/README.md | 17 +- configs/lvis/README.md | 22 +- configs/mask_rcnn/README.md | 17 +- .../mask_rcnn_r50_fpn_fp16_1x_coco.py | 3 + configs/mask_rcnn/metafile.yml | 30 ++- configs/ms_rcnn/README.md | 16 +- configs/nas_fcos/README.md | 16 +- configs/nas_fpn/README.md | 16 +- configs/paa/README.md | 16 ++ configs/pafpn/README.md | 16 +- configs/panoptic_fpn/README.md | 16 +- .../panoptic_fpn_r50_fpn_1x_coco.py | 5 +- configs/pascal_voc/README.md | 20 +- configs/pascal_voc/ssd300_voc0712.py | 16 +- configs/pascal_voc/ssd512_voc0712.py | 21 +- configs/pisa/README.md | 16 +- configs/point_rend/README.md | 18 +- configs/pvt/README.md | 60 ++++++ configs/pvt/metafile.yml | 136 ++++++++++++ configs/pvt/retinanet_pvt-l_fpn_1x_coco.py | 7 + configs/pvt/retinanet_pvt-m_fpn_1x_coco.py | 6 + configs/pvt/retinanet_pvt-s_fpn_1x_coco.py | 6 + configs/pvt/retinanet_pvt-t_fpn_1x_coco.py | 16 ++ configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py | 17 ++ configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py | 7 + configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py | 8 + configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py | 8 + configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py | 13 ++ configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py | 14 ++ configs/queryinst/README.md | 40 ++++ configs/queryinst/metafile.yml | 100 +++++++++ ..._proposals_crop_mstrain_480-800_3x_coco.py | 7 + ...ryinst_r101_fpn_mstrain_480-800_3x_coco.py | 7 + .../queryinst/queryinst_r50_fpn_1x_coco.py | 138 ++++++++++++ ..._proposals_crop_mstrain_480-800_3x_coco.py | 54 +++++ ...eryinst_r50_fpn_mstrain_480-800_3x_coco.py | 23 ++ configs/regnet/README.md | 18 +- configs/reppoints/README.md | 25 ++- configs/res2net/README.md | 18 +- configs/resnest/README.md | 18 +- configs/resnest/metafile.yml | 2 +- configs/retinanet/README.md | 17 +- configs/retinanet/metafile.yml | 27 ++- .../retinanet/retinanet_r50_fpn_90k_coco.py | 15 ++ .../retinanet_r50_fpn_fp16_1x_coco.py | 3 + configs/rpn/README.md | 16 +- configs/sabl/README.md | 17 +- configs/scnet/README.md | 28 ++- configs/scnet/metafile.yml | 8 +- configs/scratch/README.md | 16 +- configs/seesaw_loss/README.md | 18 +- configs/seesaw_loss/metafile.yml | 203 ++++++++++++++++++ configs/selfsup_pretrain/README.md | 23 +- configs/solo/README.md | 56 +++++ .../decoupled_solo_light_r50_fpn_3x_coco.py | 63 ++++++ .../solo/decoupled_solo_r50_fpn_1x_coco.py | 28 +++ .../solo/decoupled_solo_r50_fpn_3x_coco.py | 25 +++ configs/solo/metafile.yml | 115 ++++++++++ configs/solo/solo_r50_fpn_1x_coco.py | 53 +++++ configs/solo/solo_r50_fpn_3x_coco.py | 28 +++ configs/sparse_rcnn/README.md | 16 +- configs/ssd/README.md | 16 +- configs/ssd/ssd300_coco.py | 16 +- configs/ssd/ssd512_coco.py | 16 +- .../ssdlite_mobilenetv2_scratch_600e_coco.py | 16 +- configs/strong_baselines/README.md | 18 ++ ..._fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py | 80 +++++++ ...syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py | 2 + ..._fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py | 6 + ..._fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py | 22 ++ ...syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py | 3 + ...0_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py | 5 + configs/swin/README.md | 44 ++++ ...n_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py | 7 +- .../mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py | 3 +- ...k_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py | 3 +- configs/swin/metafile.yml | 85 ++++++++ .../retinanet_swin-t-p4-w7_fpn_1x_coco.py | 29 +++ configs/tood/README.md | 44 ++++ configs/tood/metafile.yml | 95 ++++++++ ...od_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py | 7 + configs/tood/tood_r101_fpn_mstrain_2x_coco.py | 7 + configs/tood/tood_r50_fpn_1x_coco.py | 74 +++++++ .../tood/tood_r50_fpn_anchor_based_1x_coco.py | 2 + configs/tood/tood_r50_fpn_mstrain_2x_coco.py | 22 ++ ...1_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py | 7 + .../tood_x101_64x4d_fpn_mstrain_2x_coco.py | 16 ++ configs/tridentnet/README.md | 16 +- configs/vfnet/README.md | 36 ++-- configs/vfnet/metafile.yml | 16 +- configs/wider_face/README.md | 20 +- configs/yolact/README.md | 31 +-- configs/yolact/metafile.yml | 6 +- configs/yolact/yolact_r50_1x8_coco.py | 16 +- configs/yolo/README.md | 18 +- configs/yolo/yolov3_d53_320_273e_coco.py | 4 +- .../yolo/yolov3_d53_mstrain-416_273e_coco.py | 4 +- .../yolo/yolov3_d53_mstrain-608_273e_coco.py | 2 +- .../yolo/yolov3_mobilenetv2_320_300e_coco.py | 4 +- ...olov3_mobilenetv2_mstrain-416_300e_coco.py | 4 +- configs/yolof/README.md | 16 +- configs/yolox/README.md | 27 ++- configs/yolox/metafile.yml | 43 +++- configs/yolox/yolox_s_8x8_300e_coco.py | 81 ++++--- configs/yolox/yolox_tiny_8x8_300e_coco.py | 50 ++--- 159 files changed, 3803 insertions(+), 509 deletions(-) create mode 100644 configs/carafe/metafile.yml create mode 100644 configs/common/lsj_100e_coco_instance.py rename configs/{fp16 => dcn}/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py (100%) rename configs/{fp16 => dcn}/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py (100%) create mode 100644 configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py create mode 100644 configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py delete mode 100644 configs/fp16/README.md delete mode 100644 configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py delete mode 100644 configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py delete mode 100644 configs/fp16/metafile.yml delete mode 100644 configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py create mode 100644 configs/lad/README.md create mode 100644 configs/lad/lad_r101_paa_r50_fpn_coco_1x.py create mode 100644 configs/lad/lad_r50_paa_r101_fpn_coco_1x.py create mode 100644 configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py create mode 100644 configs/pvt/README.md create mode 100644 configs/pvt/metafile.yml create mode 100644 configs/pvt/retinanet_pvt-l_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvt-m_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvt-s_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvt-t_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py create mode 100644 configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py create mode 100644 configs/queryinst/README.md create mode 100644 configs/queryinst/metafile.yml create mode 100644 configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py create mode 100644 configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py create mode 100644 configs/queryinst/queryinst_r50_fpn_1x_coco.py create mode 100644 configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py create mode 100644 configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py create mode 100644 configs/retinanet/retinanet_r50_fpn_90k_coco.py create mode 100644 configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py create mode 100644 configs/seesaw_loss/metafile.yml create mode 100644 configs/solo/README.md create mode 100644 configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py create mode 100644 configs/solo/decoupled_solo_r50_fpn_1x_coco.py create mode 100644 configs/solo/decoupled_solo_r50_fpn_3x_coco.py create mode 100644 configs/solo/metafile.yml create mode 100644 configs/solo/solo_r50_fpn_1x_coco.py create mode 100644 configs/solo/solo_r50_fpn_3x_coco.py create mode 100644 configs/strong_baselines/README.md create mode 100644 configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py create mode 100644 configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py create mode 100644 configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py create mode 100644 configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py create mode 100644 configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py create mode 100644 configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py create mode 100644 configs/swin/README.md create mode 100644 configs/swin/metafile.yml create mode 100644 configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py create mode 100644 configs/tood/README.md create mode 100644 configs/tood/metafile.yml create mode 100644 configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py create mode 100644 configs/tood/tood_r101_fpn_mstrain_2x_coco.py create mode 100644 configs/tood/tood_r50_fpn_1x_coco.py create mode 100644 configs/tood/tood_r50_fpn_anchor_based_1x_coco.py create mode 100644 configs/tood/tood_r50_fpn_mstrain_2x_coco.py create mode 100644 configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py create mode 100644 configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py diff --git a/configs/albu_example/README.md b/configs/albu_example/README.md index b11ae56..7f0eff5 100644 --- a/configs/albu_example/README.md +++ b/configs/albu_example/README.md @@ -1,5 +1,21 @@ # Albu Example +## Abstract + + + +Data augmentation is a commonly used technique for increasing both the size and the diversity of labeled training sets by leveraging input transformations that preserve output labels. In computer vision domain, image augmentations have become a common implicit regularization technique to combat overfitting in deep convolutional neural networks and are ubiquitously used to improve performance. While most deep learning frameworks implement basic image transformations, the list is typically limited to some variations and combinations of flipping, rotating, scaling, and cropping. Moreover, the image processing speed varies in existing tools for image augmentation. We present Albumentations, a fast and flexible library for image augmentations with many various image transform operations available, that is also an easy-to-use wrapper around other augmentation libraries. We provide examples of image augmentations for different computer vision tasks and show that Albumentations is faster than other commonly used image augmentation tools on the most of commonly used image transformations. + + +
+ +
+ + + + +## Citation + ``` diff --git a/configs/atss/README.md b/configs/atss/README.md index 8aa7746..035964f 100644 --- a/configs/atss/README.md +++ b/configs/atss/README.md @@ -1,6 +1,20 @@ # Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection -## Introduction +## Abstract + + + +Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. + + +
+ +
+ + + + +## Citation diff --git a/configs/autoassign/README.md b/configs/autoassign/README.md index 4d5f376..172071d 100644 --- a/configs/autoassign/README.md +++ b/configs/autoassign/README.md @@ -1,6 +1,20 @@ # AutoAssign: Differentiable Label Assignment for Dense Object Detection -## Introduction +## Abstract + + + +Determining positive/negative samples for object detection is known as label assignment. Here we present an anchor-free detector named AutoAssign. It requires little human knowledge and achieves appearance-aware through a fully differentiable weighting mechanism. During training, to both satisfy the prior distribution of data and adapt to category characteristics, we present Center Weighting to adjust the category-specific prior distributions. To adapt to object appearances, Confidence Weighting is proposed to adjust the specific assign strategy of each instance. The two weighting modules are then combined to generate positive and negative weights to adjust each location's confidence. Extensive experiments on the MS COCO show that our method steadily surpasses other best sampling strategies by large margins with various backbones. Moreover, our best model achieves 52.1% AP, outperforming all existing one-stage detectors. Besides, experiments on other datasets, e.g., PASCAL VOC, Objects365, and WiderFace, demonstrate the broad applicability of AutoAssign. + + +
+ +
+ + + + +## Citation diff --git a/configs/carafe/README.md b/configs/carafe/README.md index 768cb98..dca52e6 100644 --- a/configs/carafe/README.md +++ b/configs/carafe/README.md @@ -1,6 +1,20 @@ # CARAFE: Content-Aware ReAssembly of FEatures -## Introduction +## Abstract + + + +Feature upsampling is a key operation in a number of modern convolutional network architectures, e.g. feature pyramids. Its design is critical for dense prediction tasks such as object detection and semantic/instance segmentation. In this work, we propose Content-Aware ReAssembly of FEatures (CARAFE), a universal, lightweight and highly effective operator to fulfill this goal. CARAFE has several appealing properties: (1) Large field of view. Unlike previous works (e.g. bilinear interpolation) that only exploit sub-pixel neighborhood, CARAFE can aggregate contextual information within a large receptive field. (2) Content-aware handling. Instead of using a fixed kernel for all samples (e.g. deconvolution), CARAFE enables instance-specific content-aware handling, which generates adaptive kernels on-the-fly. (3) Lightweight and fast to compute. CARAFE introduces little computational overhead and can be readily integrated into modern network architectures. We conduct comprehensive evaluations on standard benchmarks in object detection, instance/semantic segmentation and inpainting. CARAFE shows consistent and substantial gains across all the tasks (1.2%, 1.3%, 1.8%, 1.1db respectively) with negligible computational overhead. It has great potential to serve as a strong building block for future research. It has great potential to serve as a strong building block for future research. + + +
+ +
+ + + + +## Citation diff --git a/configs/carafe/metafile.yml b/configs/carafe/metafile.yml new file mode 100644 index 0000000..b58a3f6 --- /dev/null +++ b/configs/carafe/metafile.yml @@ -0,0 +1,55 @@ +Collections: + - Name: CARAFE + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - RPN + - FPN_CARAFE + - ResNet + - RoIPool + Paper: + URL: https://arxiv.org/abs/1905.02188 + Title: 'CARAFE: Content-Aware ReAssembly of FEatures' + README: configs/carafe/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11 + Version: v2.12.0 + +Models: + - Name: faster_rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.26 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.6 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 38.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth + + - Name: mask_rcnn_r50_fpn_carafe_1x_coco + In Collection: CARAFE + Config: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py + Metadata: + Training Memory (GB): 4.31 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 39.3 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth diff --git a/configs/cascade_rcnn/README.md b/configs/cascade_rcnn/README.md index 7d18ab1..a88cfd7 100644 --- a/configs/cascade_rcnn/README.md +++ b/configs/cascade_rcnn/README.md @@ -1,6 +1,20 @@ # Cascade R-CNN: High Quality Object Detection and Instance Segmentation -## Introduction +## Abstract + + + +In object detection, the intersection over union (IoU) threshold is frequently used to define positives/negatives. The threshold used to train a detector defines its quality. While the commonly used threshold of 0.5 leads to noisy (low-quality) detections, detection performance frequently degrades for larger thresholds. This paradox of high-quality detection has two causes: 1) overfitting, due to vanishing positive samples for large thresholds, and 2) inference-time quality mismatch between detector and test hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, composed of a sequence of detectors trained with increasing IoU thresholds, is proposed to address these problems. The detectors are trained sequentially, using the output of a detector as training set for the next. This resampling progressively improves hypotheses quality, guaranteeing a positive training set of equivalent size for all detectors and minimizing overfitting. The same cascade is applied at inference, to eliminate quality mismatches between hypotheses and detectors. An implementation of the Cascade R-CNN without bells or whistles achieves state-of-the-art performance on the COCO dataset, and significantly improves high-quality detection on generic and specific object detection datasets, including VOC, KITTI, CityPerson, and WiderFace. Finally, the Cascade R-CNN is generalized to instance segmentation, with nontrivial improvements over the Mask R-CNN. + + +
+ +
+ + + + +## Citation diff --git a/configs/cascade_rpn/README.md b/configs/cascade_rpn/README.md index aa7782c..06b25a5 100644 --- a/configs/cascade_rpn/README.md +++ b/configs/cascade_rpn/README.md @@ -1,4 +1,20 @@ -# Cascade RPN +# Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution + +## Abstract + + + +This paper considers an architecture referred to as Cascade Region Proposal Network (Cascade RPN) for improving the region-proposal quality and detection performance by systematically addressing the limitation of the conventional RPN that heuristically defines the anchors and aligns the features to the anchors. First, instead of using multiple anchors with predefined scales and aspect ratios, Cascade RPN relies on a single anchor per location and performs multi-stage refinement. Each stage is progressively more stringent in defining positive samples by starting out with an anchor-free metric followed by anchor-based metrics in the ensuing stages. Second, to attain alignment between the features and the anchors throughout the stages, adaptive convolution is proposed that takes the anchors in addition to the image features as its input and learns the sampled features guided by the anchors. A simple implementation of a two-stage Cascade RPN achieves AR 13.4 points higher than that of the conventional RPN, surpassing any existing region proposal methods. When adopting to Fast R-CNN and Faster R-CNN, Cascade RPN can improve the detection mAP by 3.1 and 3.5 points, respectively. + + +
+ +
+ + + + +## Citation @@ -17,13 +33,13 @@ We provide the code for reproducing experiment results of [Cascade RPN](https:// ### Region proposal performance -| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 | Download | -|:------:|:--------:|:-----:|:--------:|:-------------------:|:--------------:|:-------:|:--------------------------------------:| -| CRPN | R-50-FPN | caffe | - | - | - | 72.0 | [model](https://drive.google.com/file/d/1qxVdOnCgK-ee7_z0x6mvAir_glMu2Ihi/view?usp=sharing) | +| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 | Config | Download | +|:------:|:--------:|:-----:|:--------:|:-------------------:|:--------------:|:-------:|:-------:|:--------------------------------------:| +| CRPN | R-50-FPN | caffe | - | - | - | 72.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_r50_caffe_fpn_1x_coco/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth) | ### Detection performance -| Method | Proposal | Backbone | Style | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Download | -|:-------------:|:-----------:|:--------:|:-------:|:--------:|:--------:|:-------------------:|:--------------:|:------:|:--------------------------------------------:| -| Fast R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 39.9 | [model](https://drive.google.com/file/d/1NmbnuY5VHi8I9FE8xnp5uNvh2i-t-6_L/view?usp=sharing) | -| Faster R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 40.4 | [model](https://drive.google.com/file/d/1dS3Q66qXMJpcuuQgDNkLp669E5w1UMuZ/view?usp=sharing) | +| Method | Proposal | Backbone | Style | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | +|:-------------:|:-----------:|:--------:|:-------:|:--------:|:--------:|:-------------------:|:--------------:|:------:|:-------:|:--------------------------------------------:| +| Fast R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth) | +| Faster R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth) | diff --git a/configs/centernet/README.md b/configs/centernet/README.md index a1fcf3b..37c18e7 100644 --- a/configs/centernet/README.md +++ b/configs/centernet/README.md @@ -1,6 +1,20 @@ -# CenterNet +# Objects as Points -## Introduction +## Abstract + + + +Detection identifies objects as axis-aligned boxes in an image. Most successful object detectors enumerate a nearly exhaustive list of potential object locations and classify each. This is wasteful, inefficient, and requires additional post-processing. In this paper, we take a different approach. We model an object as a single point --- the center point of its bounding box. Our detector uses keypoint estimation to find center points and regresses to all other object properties, such as size, 3D location, orientation, and even pose. Our center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. CenterNet achieves the best speed-accuracy trade-off on the MS COCO dataset, with 28.1% AP at 142 FPS, 37.4% AP at 52 FPS, and 45.1% AP with multi-scale testing at 1.4 FPS. We use the same approach to estimate 3D bounding box in the KITTI benchmark and human pose on the COCO keypoint dataset. Our method performs competitively with sophisticated multi-stage methods and runs in real-time. + + +
+ +
+ + + + +## Citation diff --git a/configs/centripetalnet/README.md b/configs/centripetalnet/README.md index 543cf06..f3d22a5 100644 --- a/configs/centripetalnet/README.md +++ b/configs/centripetalnet/README.md @@ -1,6 +1,20 @@ -# CentripetalNet +# CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection -## Introduction +## Abstract + + + +Keypoint-based detectors have achieved pretty-well performance. However, incorrect keypoint matching is still widespread and greatly affects the performance of the detector. In this paper, we propose CentripetalNet which uses centripetal shift to pair corner keypoints from the same instance. CentripetalNet predicts the position and the centripetal shift of the corner points and matches corners whose shifted results are aligned. Combining position information, our approach matches corner points more accurately than the conventional embedding approaches do. Corner pooling extracts information inside the bounding boxes onto the border. To make this information more aware at the corners, we design a cross-star deformable convolution network to conduct feature adaption. Furthermore, we explore instance segmentation on anchor-free detectors by equipping our CentripetalNet with a mask prediction module. On MS-COCO test-dev, our CentripetalNet not only outperforms all existing anchor-free detectors with an AP of 48.0% but also achieves comparable performance to the state-of-the-art instance segmentation approaches with a 40.2% MaskAP. + + +
+ +
+ + + + +## Citation diff --git a/configs/cityscapes/README.md b/configs/cityscapes/README.md index b790771..28310f1 100644 --- a/configs/cityscapes/README.md +++ b/configs/cityscapes/README.md @@ -1,4 +1,21 @@ -# Cityscapes Dataset +# The Cityscapes Dataset for Semantic Urban Scene Understanding + +## Abstract + + + +Visual understanding of complex urban street scenes is an enabling factor for a wide range of applications. Object detection has benefited enormously from large-scale datasets, especially in the context of deep learning. For semantic urban scene understanding, however, no current dataset adequately captures the complexity of real-world urban scenes. +To address this, we introduce Cityscapes, a benchmark suite and large-scale dataset to train and test approaches for pixel-level and instance-level semantic labeling. Cityscapes is comprised of a large, diverse set of stereo video sequences recorded in streets from 50 different cities. 5000 of these images have high quality pixel-level annotations; 20000 additional images have coarse annotations to enable methods that leverage large volumes of weakly-labeled data. Crucially, our effort exceeds previous attempts in terms of dataset size, annotation richness, scene variability, and complexity. Our accompanying empirical study provides an in-depth analysis of the dataset characteristics, as well as a performance evaluation of several state-of-the-art approaches based on our benchmark. + + +
+ +
+ + + + +## Citation diff --git a/configs/common/lsj_100e_coco_instance.py b/configs/common/lsj_100e_coco_instance.py new file mode 100644 index 0000000..cacf23d --- /dev/null +++ b/configs/common/lsj_100e_coco_instance.py @@ -0,0 +1,90 @@ +_base_ = '../_base_/default_runtime.py' +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +image_size = (1024, 1024) + +file_client_args = dict(backend='disk') +# comment out the code below to use different file client +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +train_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=4, # simply change this from 2 to 16 for 50e - 400e training. + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=5, metric=['bbox', 'segm']) + +# optimizer assumes bs=64 +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) +optimizer_config = dict(grad_clip=None) + +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.067, + step=[22, 24]) +runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/configs/cornernet/README.md b/configs/cornernet/README.md index a060196..d7dc08c 100644 --- a/configs/cornernet/README.md +++ b/configs/cornernet/README.md @@ -1,6 +1,20 @@ -# CornerNet +# Cornernet: Detecting objects as paired keypoints -## Introduction +## Abstract + + + +We propose CornerNet, a new approach to object detection where we detect an object bounding box as a pair of keypoints, the top-left corner and the bottom-right corner, using a single convolution neural network. By detecting objects as paired keypoints, we eliminate the need for designing a set of anchor boxes commonly used in prior single-stage detectors. In addition to our novel formulation, we introduce corner pooling, a new type of pooling layer that helps the network better localize corners. Experiments show that CornerNet achieves a 42.2% AP on MS COCO, outperforming all existing one-stage detectors. + + +
+ +
+ + + + +## Citation diff --git a/configs/dcn/README.md b/configs/dcn/README.md index 044ff29..d9d23f0 100644 --- a/configs/dcn/README.md +++ b/configs/dcn/README.md @@ -1,6 +1,20 @@ # Deformable Convolutional Networks -## Introduction +## Abstract + + + +Convolutional neural networks (CNNs) are inherently limited to model geometric transformations due to the fixed geometric structures in its building modules. In this work, we introduce two new modules to enhance the transformation modeling capacity of CNNs, namely, deformable convolution and deformable RoI pooling. Both are based on the idea of augmenting the spatial sampling locations in the modules with additional offsets and learning the offsets from target tasks, without additional supervision. The new modules can readily replace their plain counterparts in existing CNNs and can be easily trained end-to-end by standard back-propagation, giving rise to deformable convolutional networks. Extensive experiments validate the effectiveness of our approach on sophisticated vision tasks of object detection and semantic segmentation. + + +
+ +
+ + + + +## Citation @@ -43,6 +57,8 @@ | R-50-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 10.0 | 44.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202_010309.log.json) | | R-101-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 8.0 | 8.6 | 45.8 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204_134006.log.json) | | X-101-32x4d-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 9.2 | | 47.3 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-20200606_183737.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | dconv(c3-c5) | - | 1x | 3.0 | | 41.9 | 37.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247.log.json) | +| R-50-FPN (FP16) | Mask | pytorch | mdconv(c3-c5)| - | 1x | 3.1 | | 42.0 | 37.6 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | **Notes:** diff --git a/configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py b/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py similarity index 100% rename from configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py rename to configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py diff --git a/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py b/configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py similarity index 100% rename from configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py rename to configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py diff --git a/configs/dcn/metafile.yml b/configs/dcn/metafile.yml index 4fcbce6..7919b84 100644 --- a/configs/dcn/metafile.yml +++ b/configs/dcn/metafile.yml @@ -10,7 +10,7 @@ Collections: - Deformable Convolution Paper: URL: https://arxiv.org/abs/1811.11168 - Title: 'Deformable ConvNets v2: More Deformable, Better Results' + Title: "Deformable ConvNets v2: More Deformable, Better Results" README: configs/dcn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 @@ -178,7 +178,7 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 37.4 + mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth - Name: mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco @@ -202,9 +202,51 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 37.1 + mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth + - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py + Metadata: + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Training Memory (GB): 3.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.9 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth + + - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco + In Collection: Deformable Convolutional Networks + Config: configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py + Metadata: + Training Memory (GB): 3.1 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth + - Name: mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py @@ -226,7 +268,7 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 38.9 + mask AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth - Name: cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco @@ -290,7 +332,7 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 38.6 + mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth - Name: cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco @@ -314,7 +356,7 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 39.7 + mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco @@ -331,5 +373,5 @@ Models: - Task: Instance Segmentation Dataset: COCO Metrics: - mask AP: 41.1 + mask AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth diff --git a/configs/deepfashion/README.md b/configs/deepfashion/README.md index 68e57e4..e2c042f 100644 --- a/configs/deepfashion/README.md +++ b/configs/deepfashion/README.md @@ -1,4 +1,20 @@ -# DeepFashion +# DeepFashion: Powering Robust Clothes Recognition and Retrieval With Rich Annotations + +## Abstract + + + +Recent advances in clothes recognition have been driven by the construction of clothes datasets. Existing datasets are limited in the amount of annotations and are difficult to cope with the various challenges in real-world applications. In this work, we introduce DeepFashion, a large-scale clothes dataset with comprehensive annotations. It contains over 800,000 images, which are richly annotated with massive attributes, clothing landmarks, and correspondence of images taken under different scenarios including store, street snapshot, and consumer. Such rich annotations enable the development of powerful algorithms in clothes recognition and facilitating future researches. To demonstrate the advantages of DeepFashion, we propose a new deep model, namely FashionNet, which learns clothing features by jointly predicting clothing attributes and landmarks. The estimated landmarks are then employed to pool or gate the learned features. It is optimized in an iterative manner. Extensive experiments demonstrate the effectiveness of FashionNet and the usefulness of DeepFashion. + + +
+ +
+ + + + +## Introduction @@ -39,6 +55,8 @@ mmdetection After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config or creating your own config file. +## Citation + ``` @inproceedings{liuLQWTcvpr16DeepFashion, author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, @@ -53,4 +71,4 @@ or creating your own config file. | Backbone | Model type | Dataset | bbox detection Average Precision | segmentation Average Precision | Config | Download (Google) | | :---------: | :----------: | :-----------------: | :--------------------------------: | :----------------------------: | :---------:| :-------------------------: | -| ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 |[config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py)| [model](https://drive.google.com/open?id=1q6zF7J6Gb-FFgM87oIORIt6uBozaXp5r) | [log](https://drive.google.com/file/d/1qTK4Dr4FFLa9fkdI6UVko408gkrfTRLP/view?usp=sharing) | +| ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 |[config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py)| [model](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/mask_rcnn_r50_fpn_15e_deepfashion_20200329_192752.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/20200329_192752.log.json) | diff --git a/configs/deformable_detr/README.md b/configs/deformable_detr/README.md index fe68002..e3b8e41 100644 --- a/configs/deformable_detr/README.md +++ b/configs/deformable_detr/README.md @@ -1,6 +1,20 @@ -# Deformable DETR +# Deformable DETR: Deformable Transformers for End-to-End Object Detection -## Introduction +## Abstract + + + +DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. + + +
+ +
+ + + + +## Citation diff --git a/configs/detectors/README.md b/configs/detectors/README.md index 37c151f..c90302b 100644 --- a/configs/detectors/README.md +++ b/configs/detectors/README.md @@ -1,6 +1,20 @@ -# DetectoRS +# DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution -## Introduction +## Abstract + + + +Many modern object detectors demonstrate outstanding performances by using the mechanism of looking and thinking twice. In this paper, we explore this mechanism in the backbone design for object detection. At the macro level, we propose Recursive Feature Pyramid, which incorporates extra feedback connections from Feature Pyramid Networks into the bottom-up backbone layers. At the micro level, we propose Switchable Atrous Convolution, which convolves the features with different atrous rates and gathers the results using switch functions. Combining them results in DetectoRS, which significantly improves the performances of object detection. On COCO test-dev, DetectoRS achieves state-of-the-art 55.7% box AP for object detection, 48.5% mask AP for instance segmentation, and 50.0% PQ for panoptic segmentation. + + +
+ +
+ + + + +## Citation diff --git a/configs/detr/README.md b/configs/detr/README.md index 617a8fb..a04e7ab 100644 --- a/configs/detr/README.md +++ b/configs/detr/README.md @@ -1,6 +1,20 @@ -# DETR +# End-to-End Object Detection with Transformers -## Introduction +## Abstract + + + +We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines. + + +
+ +
+ + + + +## Citation diff --git a/configs/double_heads/README.md b/configs/double_heads/README.md index 872211d..8191d7a 100644 --- a/configs/double_heads/README.md +++ b/configs/double_heads/README.md @@ -1,6 +1,20 @@ # Rethinking Classification and Localization for Object Detection -## Introduction +## Abstract + + + +Two head structures (i.e. fully connected head and convolution head) have been widely used in R-CNN based detectors for classification and localization tasks. However, there is a lack of understanding of how does these two head structures work for these two tasks. To address this issue, we perform a thorough analysis and find an interesting fact that the two head structures have opposite preferences towards the two tasks. Specifically, the fully connected head (fc-head) is more suitable for the classification task, while the convolution head (conv-head) is more suitable for the localization task. Furthermore, we examine the output feature maps of both heads and find that fc-head has more spatial sensitivity than conv-head. Thus, fc-head has more capability to distinguish a complete object from part of an object, but is not robust to regress the whole object. Based upon these findings, we propose a Double-Head method, which has a fully connected head focusing on classification and a convolution head for bounding box regression. Without bells and whistles, our method gains +3.5 and +2.8 AP on MS COCO dataset from Feature Pyramid Network (FPN) baselines with ResNet-50 and ResNet-101 backbones, respectively. + + +
+ +
+ + + + +## Citation diff --git a/configs/dynamic_rcnn/README.md b/configs/dynamic_rcnn/README.md index 2b087c3..d79b181 100644 --- a/configs/dynamic_rcnn/README.md +++ b/configs/dynamic_rcnn/README.md @@ -1,6 +1,20 @@ # Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training -## Introduction +## Abstract + + + +Although two-stage object detectors have continuously advanced the state-of-the-art performance in recent years, the training process itself is far from crystal. In this work, we first point out the inconsistency problem between the fixed network settings and the dynamic training procedure, which greatly affects the performance. For example, the fixed label assignment strategy and regression loss function cannot fit the distribution change of proposals and thus are harmful to training high quality detectors. Consequently, we propose Dynamic R-CNN to adjust the label assignment criteria (IoU threshold) and the shape of regression loss function (parameters of SmoothL1 Loss) automatically based on the statistics of proposals during training. This dynamic design makes better use of the training samples and pushes the detector to fit more high quality samples. Specifically, our method improves upon ResNet-50-FPN baseline with 1.9% AP and 5.5% AP90 on the MS COCO dataset with no extra overhead. + + +
+ +
+ + + + +## Citation diff --git a/configs/empirical_attention/README.md b/configs/empirical_attention/README.md index f8647bd..8937be2 100644 --- a/configs/empirical_attention/README.md +++ b/configs/empirical_attention/README.md @@ -1,6 +1,20 @@ # An Empirical Study of Spatial Attention Mechanisms in Deep Networks -## Introduction +## Abstract + + + +Attention mechanisms have become a popular component in deep neural networks, yet there has been little examination of how different influencing factors and methods for computing attention from these factors affect performance. Toward a better general understanding of attention mechanisms, we present an empirical study that ablates various spatial attention elements within a generalized attention formulation, encompassing the dominant Transformer attention as well as the prevalent deformable convolution and dynamic convolution modules. Conducted on a variety of applications, the study yields significant findings about spatial attention in deep networks, some of which run counter to conventional understanding. For example, we find that the query and key content comparison in Transformer attention is negligible for self-attention, but vital for encoder-decoder attention. A proper combination of deformable convolution with key content only saliency achieves the best accuracy-efficiency tradeoff in self-attention. Our results suggest that there exists much room for improvement in the design of attention mechanisms. + + +
+ +
+ + + + +## Citation diff --git a/configs/fast_rcnn/README.md b/configs/fast_rcnn/README.md index 3be862a..0e98c13 100644 --- a/configs/fast_rcnn/README.md +++ b/configs/fast_rcnn/README.md @@ -1,6 +1,20 @@ # Fast R-CNN -## Introduction +## Abstract + + + +This paper proposes a Fast Region-based Convolutional Network method (Fast R-CNN) for object detection. Fast R-CNN builds on previous work to efficiently classify object proposals using deep convolutional networks. Compared to previous work, Fast R-CNN employs several innovations to improve training and testing speed while also increasing detection accuracy. Fast R-CNN trains the very deep VGG16 network 9x faster than R-CNN, is 213x faster at test-time, and achieves a higher mAP on PASCAL VOC 2012. Compared to SPPnet, Fast R-CNN trains VGG16 3x faster, tests 10x faster, and is more accurate. + + +
+ +
+ + + + +## Citation diff --git a/configs/faster_rcnn/README.md b/configs/faster_rcnn/README.md index 772a6c4..f96256e 100644 --- a/configs/faster_rcnn/README.md +++ b/configs/faster_rcnn/README.md @@ -1,7 +1,20 @@ # Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks -## Introduction +## Abstract + + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + + +
+ +
+ + + + +## Citation ```latex @@ -22,6 +35,7 @@ | R-50-DC5 | caffe | 1x | - | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909.log.json) | | R-50-FPN | caffe | 1x | 3.8 | | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_20200504_180032.log.json) | | R-50-FPN | pytorch | 1x | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.4 | 28.8 | 37.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | | R-50-FPN | pytorch | 2x | - | - | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_20200504_210434.log.json) | | R-101-FPN | caffe | 1x | 5.7 | | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_20200504_180057.log.json) | | R-101-FPN | pytorch | 1x | 6.0 | 15.6 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130_204655.log.json) | diff --git a/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py b/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py new file mode 100644 index 0000000..b5aea6a --- /dev/null +++ b/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py @@ -0,0 +1,15 @@ +_base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[60000, 80000]) + +# Runner type +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) + +checkpoint_config = dict(interval=10000) +evaluation = dict(interval=10000, metric='bbox') diff --git a/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py b/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..acd4040 --- /dev/null +++ b/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './faster_rcnn_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/configs/faster_rcnn/metafile.yml b/configs/faster_rcnn/metafile.yml index 97faf53..5178a3b 100644 --- a/configs/faster_rcnn/metafile.yml +++ b/configs/faster_rcnn/metafile.yml @@ -13,7 +13,7 @@ Collections: - RoIPool Paper: URL: https://arxiv.org/abs/1506.01497 - Title: 'Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks' + Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" README: configs/faster_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/faster_rcnn.py#L6 @@ -65,18 +65,42 @@ Models: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth + - Name: faster_rcnn_r50_fpn_fp16_1x_coco + In Collection: Faster R-CNN + Config: configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 3.4 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 34.72 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth + - Name: faster_rcnn_r50_fpn_2x_coco In Collection: Faster R-CNN Config: configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - - value: 46.73 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 46.73 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection @@ -104,12 +128,12 @@ Models: Metadata: Training Memory (GB): 6.0 inference time (ms/im): - - value: 64.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection @@ -124,12 +148,12 @@ Models: Metadata: Training Memory (GB): 6.0 inference time (ms/im): - - value: 64.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 64.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection @@ -144,12 +168,12 @@ Models: Metadata: Training Memory (GB): 7.2 inference time (ms/im): - - value: 72.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection @@ -164,12 +188,12 @@ Models: Metadata: Training Memory (GB): 7.2 inference time (ms/im): - - value: 72.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 72.46 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection @@ -184,12 +208,12 @@ Models: Metadata: Training Memory (GB): 10.3 inference time (ms/im): - - value: 106.38 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection @@ -204,12 +228,12 @@ Models: Metadata: Training Memory (GB): 10.3 inference time (ms/im): - - value: 106.38 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) + - value: 106.38 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection diff --git a/configs/fcos/README.md b/configs/fcos/README.md index ae5470a..fc5ca54 100644 --- a/configs/fcos/README.md +++ b/configs/fcos/README.md @@ -1,6 +1,20 @@ # FCOS: Fully Convolutional One-Stage Object Detection -## Introduction +## Abstract + + + +We propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks. + + +
+ +
+ + + + +## Citation @@ -17,16 +31,16 @@ | Backbone | Style | GN | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | |:---------:|:-------:|:-------:|:--------:|:-------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) | -| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json)| -| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json)| -| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) | +| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) | +| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json)| +| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json)| +| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) | | Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | |:---------:|:-------:|:-------:|:--------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) | -| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) | -| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) | +| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) | +| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) | +| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) | **Notes:** diff --git a/configs/fcos/metafile.yml b/configs/fcos/metafile.yml index c0d6f93..ae922eb 100644 --- a/configs/fcos/metafile.yml +++ b/configs/fcos/metafile.yml @@ -37,7 +37,7 @@ Models: Dataset: COCO Metrics: box AP: 36.6 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco In Collection: FCOS @@ -50,7 +50,7 @@ Models: Dataset: COCO Metrics: box AP: 38.7 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco In Collection: FCOS @@ -63,7 +63,7 @@ Models: Dataset: COCO Metrics: box AP: 42.3 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth - Name: fcos_r101_caffe_fpn_gn-head_1x_coco In Collection: FCOS @@ -83,7 +83,7 @@ Models: Dataset: COCO Metrics: box AP: 39.1 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth - Name: fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS @@ -103,7 +103,7 @@ Models: Dataset: COCO Metrics: box AP: 38.5 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth - Name: fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS @@ -123,7 +123,7 @@ Models: Dataset: COCO Metrics: box AP: 40.8 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth - Name: fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco In Collection: FCOS @@ -143,4 +143,4 @@ Models: Dataset: COCO Metrics: box AP: 42.6 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth diff --git a/configs/foveabox/README.md b/configs/foveabox/README.md index b9b5684..7009e97 100644 --- a/configs/foveabox/README.md +++ b/configs/foveabox/README.md @@ -1,7 +1,23 @@ # FoveaBox: Beyond Anchor-based Object Detector +## Abstract + + + +We present FoveaBox, an accurate, flexible, and completely anchor-free framework for object detection. While almost all state-of-the-art object detectors utilize predefined anchors to enumerate possible locations, scales and aspect ratios for the search of the objects, their performance and generalization ability are also limited to the design of anchors. Instead, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. The scales of target boxes are naturally associated with feature pyramid representations. In FoveaBox, an instance is assigned to adjacent feature levels to make the model more accurate.We demonstrate its effectiveness on standard benchmarks and report extensive experimental analysis. Without bells and whistles, FoveaBox achieves state-of-the-art single model performance on the standard COCO and Pascal VOC object detection benchmark. More importantly, FoveaBox avoids all computation and hyper-parameters related to anchor boxes, which are often sensitive to the final detection performance. We believe the simple and effective approach will serve as a solid baseline and help ease future research for object detection. + + +
+ +
+ + + + +## Introduction + FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797): Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. diff --git a/configs/fp16/README.md b/configs/fp16/README.md deleted file mode 100644 index 171aa1b..0000000 --- a/configs/fp16/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Mixed Precision Training - -## Introduction - - - -```latex -@article{micikevicius2017mixed, - title={Mixed precision training}, - author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others}, - journal={arXiv preprint arXiv:1710.03740}, - year={2017} -} -``` - -## Results and Models - -| Architecture | Backbone | Style | Conv | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:------------:|:---------:|:-------:|:------------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| Faster R-CNN | R-50 | pytorch | - | 1x | 3.4 | 28.8 | 37.5 | - |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | -| Mask R-CNN | R-50 | pytorch | - | 1x | 3.6 | 24.1 | 38.1 | 34.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | -| Mask R-CNN | R-50 | pytorch | dconv(c3-c5) | 1x | 3.0 | | 41.9 | 37.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247.log.json) | -| Mask R-CNN | R-50 | pytorch | mdconv(c3-c5)| 1x | 3.1 | | 42.0 | 37.6 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | -| Retinanet | R-50 | pytorch | - | 1x | 2.8 | 31.6 | 36.4 | |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | diff --git a/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py b/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index 78fa5b6..0000000 --- a/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py b/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index f506ea8..0000000 --- a/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/configs/fp16/metafile.yml b/configs/fp16/metafile.yml deleted file mode 100644 index 70d6649..0000000 --- a/configs/fp16/metafile.yml +++ /dev/null @@ -1,113 +0,0 @@ -Collections: - - Name: FP16 - Metadata: - Training Data: COCO - Training Techniques: - - Mixed Precision Training - Training Resources: 8x V100 GPUs - Paper: - URL: https://arxiv.org/abs/1710.03740 - Title: 'Mixed Precision Training' - README: configs/fp16/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/core/fp16/hooks.py#L11 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_fp16_1x_coco - In Collection: FP16 - Config: configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 3.4 - inference time (ms/im): - - value: 34.72 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth - - - Name: mask_rcnn_r50_fpn_fp16_1x_coco - In Collection: FP16 - Config: configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 3.6 - inference time (ms/im): - - value: 41.49 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth - - - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco - In Collection: FP16 - Config: configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 3.0 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth - - - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco - In Collection: FP16 - Config: configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 3.1 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth - - - Name: retinanet_r50_fpn_fp16_1x_coco - In Collection: FP16 - Config: configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 2.8 - inference time (ms/im): - - value: 31.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth diff --git a/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py b/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index 519c4db..0000000 --- a/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/configs/fpg/README.md b/configs/fpg/README.md index c0545da..dd69c61 100644 --- a/configs/fpg/README.md +++ b/configs/fpg/README.md @@ -1,7 +1,21 @@ # Feature Pyramid Grids -## Introduction +## Abstract + + + +Feature pyramid networks have been widely adopted in the object detection literature to improve feature representations for better handling of variations in scale. In this paper, we present Feature Pyramid Grids (FPG), a deep multi-pathway feature pyramid, that represents the feature scale-space as a regular grid of parallel bottom-up pathways which are fused by multi-directional lateral connections. FPG can improve single-pathway feature pyramid networks by significantly increasing its performance at similar computation cost, highlighting importance of deep pyramid representations. In addition to its general and uniform structure, over complicated structures that have been found with neural architecture search, it also compares favorably against such approaches without relying on search. We hope that FPG with its uniform and effective nature can serve as a strong component for future work in object recognition. + + +
+ +
+ + + + +## Citation ```latex @article{chen2020feature, diff --git a/configs/free_anchor/README.md b/configs/free_anchor/README.md index 5014bc0..a1dcb17 100644 --- a/configs/free_anchor/README.md +++ b/configs/free_anchor/README.md @@ -1,6 +1,20 @@ # FreeAnchor: Learning to Match Anchors for Visual Object Detection -## Introduction +## Abstract + + + +Modern CNN-based object detectors assign anchors for ground-truth objects under the restriction of object-anchor Intersection-over-Unit (IoU). In this study, we propose a learning-to-match approach to break IoU restriction, allowing objects to match anchors in a flexible manner. Our approach, referred to as FreeAnchor, updates hand-crafted anchor assignment to "free" anchor matching by formulating detector training as a maximum likelihood estimation (MLE) procedure. FreeAnchor targets at learning features which best explain a class of objects in terms of both classification and localization. FreeAnchor is implemented by optimizing detection customized likelihood and can be fused with CNN-based detectors in a plug-and-play manner. Experiments on COCO demonstrate that FreeAnchor consistently outperforms their counterparts with significant margins. + + +
+ +
+ + + + +## Citation diff --git a/configs/fsaf/README.md b/configs/fsaf/README.md index 4d64392..ba4d409 100644 --- a/configs/fsaf/README.md +++ b/configs/fsaf/README.md @@ -1,5 +1,21 @@ # Feature Selective Anchor-Free Module for Single-Shot Object Detection +## Abstract + + + +We motivate and present feature selective anchor-free (FSAF) module, a simple and effective building block for single-shot object detectors. It can be plugged into single-shot detectors with feature pyramid structure. The FSAF module addresses two limitations brought up by the conventional anchor-based detection: 1) heuristic-guided feature selection; 2) overlap-based anchor sampling. The general concept of the FSAF module is online feature selection applied to the training of multi-level anchor-free branches. Specifically, an anchor-free branch is attached to each level of the feature pyramid, allowing box encoding and decoding in the anchor-free manner at an arbitrary level. During training, we dynamically assign each instance to the most suitable feature level. At the time of inference, the FSAF module can work jointly with anchor-based branches by outputting predictions in parallel. We instantiate this concept with simple implementations of anchor-free branches and online feature selection strategy. Experimental results on the COCO detection track show that our FSAF module performs better than anchor-based counterparts while being faster. When working jointly with anchor-based branches, the FSAF module robustly improves the baseline RetinaNet by a large margin under various settings, while introducing nearly free inference overhead. And the resulting best model can achieve a state-of-the-art 44.6% mAP, outperforming all existing single-shot detectors on COCO. + + +
+ +
+ + + + +## Introduction + FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)). diff --git a/configs/gcnet/README.md b/configs/gcnet/README.md index 0ee6a94..c5c436a 100644 --- a/configs/gcnet/README.md +++ b/configs/gcnet/README.md @@ -1,17 +1,31 @@ -# GCNet for Object Detection +# GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond + +## Abstract + + + +The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. + + +
+ +
+ + + + +## Introduction By [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), Fangyun Wei, [Han Hu](https://sites.google.com/site/hanhushomepage/). We provide config files to reproduce the results in the paper for ["GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond"](https://arxiv.org/abs/1904.11492) on COCO object detection. -## Introduction - **GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet), GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks. -## Citing GCNet +## Citation ```latex @article{cao2019GCNet, diff --git a/configs/gfl/README.md b/configs/gfl/README.md index 51a6aa1..46a7a08 100644 --- a/configs/gfl/README.md +++ b/configs/gfl/README.md @@ -1,6 +1,20 @@ # Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection -## Introduction +## Abstract + + + +One-stage detector basically formulates object detection as dense classification and localization. The classification is usually optimized by Focal Loss and the box location is commonly learned under Dirac delta distribution. A recent trend for one-stage detectors is to introduce an individual prediction branch to estimate the quality of localization, where the predicted quality facilitates the classification to improve detection performance. This paper delves into the representations of the above three fundamental elements: quality estimation, classification and localization. Two problems are discovered in existing practices, including (1) the inconsistent usage of the quality estimation and classification between training and inference and (2) the inflexible Dirac delta distribution for localization when there is ambiguity and uncertainty in complex scenes. To address the problems, we design new representations for these elements. Specifically, we merge the quality estimation into the class prediction vector to form a joint representation of localization quality and classification, and use a vector to represent arbitrary distribution of box locations. The improved representations eliminate the inconsistency risk and accurately depict the flexible distribution in real data, but contain continuous labels, which is beyond the scope of Focal Loss. We then propose Generalized Focal Loss (GFL) that generalizes Focal Loss from its discrete form to the continuous version for successful optimization. On COCO test-dev, GFL achieves 45.0\% AP using ResNet-101 backbone, surpassing state-of-the-art SAPD (43.5\%) and ATSS (43.6\%) with higher or comparable inference speed, under the same backbone and training settings. Notably, our best model can achieve a single-model single-scale AP of 48.2\%, at 10 FPS on a single 2080Ti GPU. + + +
+ +
+ + + + +## Citation diff --git a/configs/ghm/README.md b/configs/ghm/README.md index 2a51ed8..97b6090 100644 --- a/configs/ghm/README.md +++ b/configs/ghm/README.md @@ -1,6 +1,20 @@ # Gradient Harmonized Single-stage Detector -## Introduction +## Abstract + + + +Despite the great success of two-stage detectors, single-stage detector is still a more elegant and efficient way, yet suffers from the two well-known disharmonies during training, i.e. the huge difference in quantity between positive and negative examples as well as between easy and hard examples. In this work, we first point out that the essential effect of the two disharmonies can be summarized in term of the gradient. Further, we propose a novel gradient harmonizing mechanism (GHM) to be a hedging for the disharmonies. The philosophy behind GHM can be easily embedded into both classification loss function like cross-entropy (CE) and regression loss function like smooth-L1 (SL1) loss. To this end, two novel loss functions called GHM-C and GHM-R are designed to balancing the gradient flow for anchor classification and bounding box refinement, respectively. Ablation study on MS COCO demonstrates that without laborious hyper-parameter tuning, both GHM-C and GHM-R can bring substantial improvement for single-stage detector. Without any whistles and bells, our model achieves 41.6 mAP on COCO test-dev set which surpasses the state-of-the-art method, Focal Loss (FL) + SL1, by 0.8. + + +
+ +
+ + + + +## Citation diff --git a/configs/gn+ws/README.md b/configs/gn+ws/README.md index 8001b0d..5cde531 100644 --- a/configs/gn+ws/README.md +++ b/configs/gn+ws/README.md @@ -1,6 +1,20 @@ # Weight Standardization -## Introduction +## Abstract + + + +Batch Normalization (BN) has become an out-of-box technique to improve deep network training. However, its effectiveness is limited for micro-batch training, i.e., each GPU typically has only 1-2 images for training, which is inevitable for many computer vision tasks, e.g., object detection and semantic segmentation, constrained by memory consumption. To address this issue, we propose Weight Standardization (WS) and Batch-Channel Normalization (BCN) to bring two success factors of BN into micro-batch training: 1) the smoothing effects on the loss landscape and 2) the ability to avoid harmful elimination singularities along the training trajectory. WS standardizes the weights in convolutional layers to smooth the loss landscape by reducing the Lipschitz constants of the loss and the gradients; BCN combines batch and channel normalizations and leverages estimated statistics of the activations in convolutional layers to keep networks away from elimination singularities. We validate WS and BCN on comprehensive computer vision tasks, including image classification, object detection, instance segmentation, video recognition and semantic segmentation. All experimental results consistently show that WS and BCN improve micro-batch training significantly. Moreover, using WS and BCN with micro-batch training is even able to match or outperform the performances of BN with large-batch training. + + +
+ +
+ + + + +## Citation diff --git a/configs/gn/README.md b/configs/gn/README.md index 5c6e41f..4a9d9b5 100644 --- a/configs/gn/README.md +++ b/configs/gn/README.md @@ -1,6 +1,20 @@ # Group Normalization -## Introduction +## Abstract + + + +Batch Normalization (BN) is a milestone technique in the development of deep learning, enabling various networks to train. However, normalizing along the batch dimension introduces problems --- BN's error increases rapidly when the batch size becomes smaller, caused by inaccurate batch statistics estimation. This limits BN's usage for training larger models and transferring features to computer vision tasks including detection, segmentation, and video, which require small batches constrained by memory consumption. In this paper, we present Group Normalization (GN) as a simple alternative to BN. GN divides the channels into groups and computes within each group the mean and variance for normalization. GN's computation is independent of batch sizes, and its accuracy is stable in a wide range of batch sizes. On ResNet-50 trained in ImageNet, GN has 10.6% lower error than its BN counterpart when using a batch size of 2; when using typical batch sizes, GN is comparably good with BN and outperforms other normalization variants. Moreover, GN can be naturally transferred from pre-training to fine-tuning. GN can outperform its BN-based counterparts for object detection and segmentation in COCO, and for video classification in Kinetics, showing that GN can effectively replace the powerful BN in a variety of tasks. GN can be easily implemented by a few lines of code in modern libraries. + + +
+ +
+ + + + +## Citation diff --git a/configs/grid_rcnn/README.md b/configs/grid_rcnn/README.md index 30cb6e0..b53cc9e 100644 --- a/configs/grid_rcnn/README.md +++ b/configs/grid_rcnn/README.md @@ -1,6 +1,22 @@ # Grid R-CNN -## Introduction +## Abstract + + + +This paper proposes a novel object detection framework named Grid R-CNN, which adopts a grid guided localization mechanism for accurate object detection. Different from the traditional regression based methods, the Grid R-CNN captures the spatial information explicitly and enjoys the position sensitive property of fully convolutional architecture. Instead of using only two independent points, we design a multi-point supervision formulation to encode more clues in order to reduce the impact of inaccurate prediction of specific points. To take the full advantage of the correlation of points in a grid, we propose a two-stage information fusion strategy to fuse feature maps of neighbor grid points. The grid guided localization approach is easy to be extended to different state-of-the-art detection frameworks. Grid R-CNN leads to high quality object localization, and experiments demonstrate that it achieves a 4.1% AP gain at IoU=0.8 and a 10.0% AP gain at IoU=0.9 on COCO benchmark compared to Faster R-CNN with Res50 backbone and FPN architecture. + +Grid R-CNN is a well-performed objection detection framework. It transforms the traditional box offset regression problem into a grid point estimation problem. With the guidance of the grid points, it can obtain high-quality localization results. However, the speed of Grid R-CNN is not so satisfactory. In this technical report we present Grid R-CNN Plus, a better and faster version of Grid R-CNN. We have made several updates that significantly speed up the framework and simultaneously improve the accuracy. On COCO dataset, the Res50-FPN based Grid R-CNN Plus detector achieves an mAP of 40.4%, outperforming the baseline on the same model by 3.0 points with similar inference time. + + +
+ +
+ + + + +## Citation diff --git a/configs/groie/README.md b/configs/groie/README.md index c38b70b..5a45be7 100644 --- a/configs/groie/README.md +++ b/configs/groie/README.md @@ -1,6 +1,22 @@ -# GRoIE +# A novel Region of Interest Extraction Layer for Instance Segmentation -## A novel Region of Interest Extraction Layer for Instance Segmentation +## Abstract + + + +Given the wide diffusion of deep neural network architectures for computer vision tasks, several new applications are nowadays more and more feasible. Among them, a particular attention has been recently given to instance segmentation, by exploiting the results achievable by two-stage networks (such as Mask R-CNN or Faster R-CNN), derived from R-CNN. In these complex architectures, a crucial role is played by the Region of Interest (RoI) extraction layer, devoted to extracting a coherent subset of features from a single Feature Pyramid Network (FPN) layer attached on top of a backbone. +This paper is motivated by the need to overcome the limitations of existing RoI extractors which select only one (the best) layer from FPN. Our intuition is that all the layers of FPN retain useful information. Therefore, the proposed layer (called Generic RoI Extractor - GRoIE) introduces non-local building blocks and attention mechanisms to boost the performance. +A comprehensive ablation study at component level is conducted to find the best set of algorithms and parameters for the GRoIE layer. Moreover, GRoIE can be integrated seamlessly with every two-stage architecture for both object detection and instance segmentation tasks. Therefore, the improvements brought about by the use of GRoIE in different state-of-the-art architectures are also evaluated. The proposed layer leads up to gain a 1.1% AP improvement on bounding box detection and 1.7% AP improvement on instance segmentation. + + +
+ +
+ + + + +## Introduction By Leonardo Rossi, Akbar Karimi and Andrea Prati from [IMPLab](http://implab.ce.unipr.it/). @@ -9,7 +25,6 @@ We provide configs to reproduce the results in the paper for "*A novel Region of Interest Extraction Layer for Instance Segmentation*" on COCO object detection. -## Introduction @@ -25,9 +40,6 @@ performance. ## Results and models The results on COCO 2017 minival (5k images) are shown in the below table. -You can find -[here](https://drive.google.com/drive/folders/19ssstbq_h0Z1cgxHmJYFO8s1arf3QJbT) -the trained models. ### Application of GRoIE to different architectures @@ -42,24 +54,24 @@ the trained models. | R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | | R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | | R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | -| R-101-FPN | + GRoIE | 1x | | | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py)| [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | +| R-101-FPN | + GRoIE | 1x | 42.6 | 38.7 | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py)| [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | ## Citation If you use this work or benchmark in your research, please cite this project. ```latex -@misc{rossi2020novel, - title={A novel Region of Interest Extraction Layer for Instance Segmentation}, - author={Leonardo Rossi and Akbar Karimi and Andrea Prati}, - year={2020}, - eprint={2004.13665}, - archivePrefix={arXiv}, - primaryClass={cs.CV} +@inproceedings{rossi2021novel, + title={A novel region of interest extraction layer for instance segmentation}, + author={Rossi, Leonardo and Karimi, Akbar and Prati, Andrea}, + booktitle={2020 25th International Conference on Pattern Recognition (ICPR)}, + pages={2203--2209}, + year={2021}, + organization={IEEE} } ``` ## Contact -The implementation of GROI is currently maintained by +The implementation of GRoIE is currently maintained by [Leonardo Rossi](https://github.com/hachreak/). diff --git a/configs/guided_anchoring/README.md b/configs/guided_anchoring/README.md index 34d6b0d..d99ced9 100644 --- a/configs/guided_anchoring/README.md +++ b/configs/guided_anchoring/README.md @@ -1,6 +1,20 @@ # Region Proposal by Guided Anchoring -## Introduction +## Abstract + + + +Region anchors are the cornerstone of modern object detection techniques. State-of-the-art detectors mostly rely on a dense anchoring scheme, where anchors are sampled uniformly over the spatial domain with a predefined set of scales and aspect ratios. In this paper, we revisit this foundational stage. Our study shows that it can be done much more effectively and efficiently. Specifically, we present an alternative scheme, named Guided Anchoring, which leverages semantic features to guide the anchoring. The proposed method jointly predicts the locations where the center of objects of interest are likely to exist as well as the scales and aspect ratios at different locations. On top of predicted anchor shapes, we mitigate the feature inconsistency with a feature adaption module. We also study the use of high-quality proposals to improve detection performance. The anchoring scheme can be seamlessly integrated into proposal methods and detectors. With Guided Anchoring, we achieve 9.1% higher recall on MS COCO with 90% fewer anchors than the RPN baseline. We also adopt Guided Anchoring in Fast R-CNN, Faster R-CNN and RetinaNet, respectively improving the detection mAP by 2.2%, 2.7% and 1.2%. + + +
+ +
+ + + + +## Citation diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md index bff5ce0..faeb673 100644 --- a/configs/hrnet/README.md +++ b/configs/hrnet/README.md @@ -1,6 +1,23 @@ # High-resolution networks (HRNets) for object detection -## Introduction +## Abstract + + + +This is an official pytorch implementation of Deep High-Resolution Representation Learning for Human Pose Estimation. In this work, we are interested in the human pose estimation problem with a focus on learning reliable high-resolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutli-resolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich high-resolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection dataset and the MPII Human Pose dataset. + +High-resolution representation learning plays an essential role in many vision problems, e.g., pose estimation and semantic segmentation. The high-resolution network (HRNet), recently developed for human pose estimation, maintains high-resolution representations through the whole process by connecting high-to-low resolution convolutions in parallel and produces strong high-resolution representations by repeatedly conducting fusions across parallel convolutions. +In this paper, we conduct a further study on high-resolution representations by introducing a simple yet effective modification and apply it to a wide range of vision tasks. We augment the high-resolution representation by aggregating the (upsampled) representations from all the parallel convolutions rather than only the representation from the high-resolution convolution as done in HRNet. This simple modification leads to stronger representations, evidenced by superior results. We show top results in semantic segmentation on Cityscapes, LIP, and PASCAL Context, and facial landmark detection on AFLW, COFW, 300W, and WFLW. In addition, we build a multi-level representation from the high-resolution representation and apply it to the Faster R-CNN object detection framework and the extended frameworks. The proposed approach achieves superior results to existing single-model networks on COCO object detection. + + +
+ +
+ + + + +## Citation diff --git a/configs/htc/README.md b/configs/htc/README.md index 1625827..d722a80 100644 --- a/configs/htc/README.md +++ b/configs/htc/README.md @@ -1,6 +1,20 @@ # Hybrid Task Cascade for Instance Segmentation -## Introduction +## Abstract + + + +Cascade is a classic yet powerful architecture that has boosted performance on various tasks. However, how to introduce cascade to instance segmentation remains an open question. A simple combination of Cascade R-CNN and Mask R-CNN only brings limited gain. In exploring a more effective approach, we find that the key to a successful instance segmentation cascade is to fully leverage the reciprocal relationship between detection and segmentation. In this work, we propose a new framework, Hybrid Task Cascade (HTC), which differs in two important aspects: (1) instead of performing cascaded refinement on these two tasks separately, it interweaves them for a joint multi-stage processing; (2) it adopts a fully convolutional branch to provide spatial context, which can help distinguishing hard foreground from cluttered background. Overall, this framework can learn more discriminative features progressively while integrating complementary features together in each stage. Without bells and whistles, a single HTC obtains 38.4 and 1.5 improvement over a strong Cascade Mask R-CNN baseline on MSCOCO dataset. Moreover, our overall system achieves 48.6 mask AP on the test-challenge split, ranking 1st in the COCO 2018 Challenge Object Detection Task. + + +
+ +
+ + + + +## Citation diff --git a/configs/instaboost/README.md b/configs/instaboost/README.md index 02e9676..e948ccb 100644 --- a/configs/instaboost/README.md +++ b/configs/instaboost/README.md @@ -1,9 +1,28 @@ -# InstaBoost for MMDetection +# Instaboost: Boosting instance segmentation via probability map guided copy-pasting + +## Abstract + + + +Instance segmentation requires a large number of training samples to achieve satisfactory performance and benefits from proper data augmentation. To enlarge the training set and increase the diversity, previous methods have investigated using data annotation from other domain (e.g. bbox, point) in a weakly supervised mechanism. In this paper, we present a simple, efficient and effective method to augment the training set using the existing instance mask annotations. Exploiting the pixel redundancy of the background, we are able to improve the performance of Mask R-CNN for 1.7 mAP on COCO dataset and 3.3 mAP on Pascal VOC dataset by simply introducing random jittering to objects. Furthermore, we propose a location probability map based approach to explore the feasible locations that objects can be placed based on local appearance similarity. With the guidance of such map, we boost the performance of R101-Mask R-CNN on instance segmentation from 35.7 mAP to 37.9 mAP without modifying the backbone or network structure. Our method is simple to implement and does not increase the computational complexity. It can be integrated into the training pipeline of any instance segmentation model without affecting the training and inference efficiency. + + +
+ +
+ + + + +## Introduction Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801). +## Citation + ```latex @inproceedings{fang2019instaboost, title={Instaboost: Boosting instance segmentation via probability map guided copy-pasting}, @@ -32,7 +51,7 @@ InstaBoost have been already integrated in the data pipeline, thus all you need ## Results and Models -- All models were trained on `coco_2017_train` and tested on `coco_2017_val` for conveinience of evaluation and comparison. In the paper, the results are obtained from `test-dev`. +- All models were trained on `coco_2017_train` and tested on `coco_2017_val` for convenience of evaluation and comparison. In the paper, the results are obtained from `test-dev`. - To balance accuracy and training time when using InstaBoost, models released in this page are all trained for 48 Epochs. Other training and testing configs strictly follow the original framework. - For results and models in MMDetection V1.x, please refer to [Instaboost](https://github.com/GothicAi/Instaboost). diff --git a/configs/lad/README.md b/configs/lad/README.md new file mode 100644 index 0000000..b48b063 --- /dev/null +++ b/configs/lad/README.md @@ -0,0 +1,50 @@ +# Improving Object Detection by Label Assignment Distillation + + +## Abstract + + + +Label assignment in object detection aims to assign targets, foreground or background, to sampled regions in an image. Unlike labeling for image classification, this problem is not well defined due to the object's bounding box. In this paper, we investigate the problem from a perspective of distillation, hence we call Label Assignment Distillation (LAD). Our initial motivation is very simple, we use a teacher network to generate labels for the student. This can be achieved in two ways: either using the teacher's prediction as the direct targets (soft label), or through the hard labels dynamically assigned by the teacher (LAD). Our experiments reveal that: (i) LAD is more effective than soft-label, but they are complementary. (ii) Using LAD, a smaller teacher can also improve a larger student significantly, while soft-label can't. We then introduce Co-learning LAD, in which two networks simultaneously learn from scratch and the role of teacher and student are dynamically interchanged. Using PAA-ResNet50 as a teacher, our LAD techniques can improve detectors PAA-ResNet101 and PAA-ResNeXt101 to 46AP and 47.5AP on the COCO test-dev set. With a stronger teacher PAA-SwinB, we improve the students PAA-ResNet50 to 43.7AP by only 1x schedule training and standard setting, and PAA-ResNet101 to 47.9AP, significantly surpassing the current methods. + + +
+ +
+ + + + +## Citation + + + + +```latex +@inproceedings{nguyen2021improving, + title={Improving Object Detection by Label Assignment Distillation}, + author={Chuong H. Nguyen and Thuy C. Nguyen and Tuan N. Tang and Nam L. H. Phan}, + booktitle = {WACV}, + year={2022} +} +``` + +## Results and Models + +We provide config files to reproduce the object detection results in the +WACV 2022 paper for Improving Object Detection by Label Assignment +Distillation. + +### PAA with LAD + +| Teacher | Student | Training schedule | AP (val) | Config | +| :-------: | :-----: | :---------------: | :------: | :----------------------------------------------------: | +| -- | R-50 | 1x | 40.4 | | +| -- | R-101 | 1x | 42.6 | | +| R-101 | R-50 | 1x | 41.6 | [config](configs/lad/lad_r50_paa_r101_fpn_coco_1x.py) | +| R-50 | R-101 | 1x | 43.2 | [config](configs/lad/lad_r101_paa_r50_fpn_coco_1x.py) | + +## Note + +- Meaning of Config name: lad_r50(student model)_paa(based on paa)_r101(teacher model)_fpn(neck)_coco(dataset)_1x(12 epoch).py +- Results may fluctuate by about 0.2 mAP. diff --git a/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py b/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py new file mode 100644 index 0000000..c7e3fcf --- /dev/null +++ b/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py @@ -0,0 +1,121 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa +model = dict( + type='LAD', + # student + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +data = dict(samples_per_gpu=8, workers_per_gpu=4) +optimizer = dict(lr=0.01) +fp16 = dict(loss_scale=512.) diff --git a/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py b/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py new file mode 100644 index 0000000..3c46f7d --- /dev/null +++ b/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py @@ -0,0 +1,120 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa +model = dict( + type='LAD', + # student + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # teacher + teacher_ckpt=teacher_ckpt, + teacher_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + teacher_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + teacher_bbox_head=dict( + type='LADHead', + reg_decoded_bbox=True, + score_voting=True, + topk=9, + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.3), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.1, + neg_iou_thr=0.1, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + score_voting=True, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +data = dict(samples_per_gpu=8, workers_per_gpu=4) +optimizer = dict(lr=0.01) +fp16 = dict(loss_scale=512.) diff --git a/configs/ld/README.md b/configs/ld/README.md index 0177f1e..baa8247 100644 --- a/configs/ld/README.md +++ b/configs/ld/README.md @@ -1,6 +1,20 @@ # Localization Distillation for Object Detection -## Introduction +## Abstract + + + +Knowledge distillation (KD) has witnessed its powerful capability in learning compact models in object detection. Previous KD methods for object detection mostly focus on imitating deep features within the imitation regions instead of mimicking classification logits due to its inefficiency in distilling localization information. In this paper, by reformulating the knowledge distillation process on localization, we present a novel localization distillation (LD) method which can efficiently transfer the localization knowledge from the teacher to the student. Moreover, we also heuristically introduce the concept of valuable localization region that can aid to selectively distill the semantic and localization knowledge for a certain region. Combining these two new components, for the first time, we show that logit mimicking can outperform feature imitation and localization knowledge distillation is more important and efficient than semantic knowledge for distilling object detectors. Our distillation scheme is simple as well as effective and can be easily applied to different dense object detectors. Experiments show that our LD can boost the AP score of GFocal-ResNet-50 with a single-scale 1× training schedule from 40.1 to 42.1 on the COCO benchmark without any sacrifice on the inference speed. + + +
+ +
+ + + + +## Citation diff --git a/configs/libra_rcnn/README.md b/configs/libra_rcnn/README.md index 8b78af4..dac1049 100644 --- a/configs/libra_rcnn/README.md +++ b/configs/libra_rcnn/README.md @@ -1,6 +1,21 @@ # Libra R-CNN: Towards Balanced Learning for Object Detection -## Introduction +## Abstract + + + +Compared with model architectures, the training process, which is also crucial to the success of detectors, has received relatively less attention in object detection. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple but effective framework towards balanced learning for object detection. It integrates three novel components: IoU-balanced sampling, balanced feature pyramid, and balanced L1 loss, respectively for reducing the imbalance at sample, feature, and objective level. Benefitted from the overall balanced design, Libra R-CNN significantly improves the detection performance. Without bells and whistles, it achieves 2.5 points and 2.0 points higher Average Precision (AP) than FPN Faster R-CNN and RetinaNet respectively on MSCOCO. + +Instance recognition is rapidly advanced along with the developments of various deep convolutional neural networks. Compared to the architectures of networks, the training process, which is also crucial to the success of detectors, has received relatively less attention. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple yet effective framework towards balanced learning for instance recognition. It integrates IoU-balanced sampling, balanced feature pyramid, and objective re-weighting, respectively for reducing the imbalance at sample, feature, and objective level. Extensive experiments conducted on MS COCO, LVIS and Pascal VOC datasets prove the effectiveness of the overall balanced design. + +
+ +
+ + + + +## Citation diff --git a/configs/lvis/README.md b/configs/lvis/README.md index 157e872..daf27e8 100644 --- a/configs/lvis/README.md +++ b/configs/lvis/README.md @@ -1,6 +1,20 @@ -# LVIS dataset +# LVIS: A Dataset for Large Vocabulary Instance Segmentation -## Introduction +## Abstract + + + +Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. In this work, we introduce LVIS (pronounced `el-vis'): a new dataset for Large Vocabulary Instance Segmentation. We plan to collect ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. + + +
+ +
+ + + + +## Citation @@ -15,14 +29,14 @@ ## Common Setting -* Please follow [install guide](../../docs/install.md#install-mmdetection) to install open-mmlab forked cocoapi first. +* Please follow [install guide](../../docs/get_started.md#install-mmdetection) to install open-mmlab forked cocoapi first. * Run following scripts to install our forked lvis-api. ```shell pip install git+https://github.com/lvis-dataset/lvis-api.git ``` -* All experiments use oversample strategy [here](../../docs/tutorials/new_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. +* All experiments use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. * The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO. ## Results and models of LVIS v0.5 diff --git a/configs/mask_rcnn/README.md b/configs/mask_rcnn/README.md index 77cfabe..5080a04 100644 --- a/configs/mask_rcnn/README.md +++ b/configs/mask_rcnn/README.md @@ -1,6 +1,20 @@ # Mask R-CNN -## Introduction +## Abstract + + + +We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. + + +
+ +
+ + + + +## Citation @@ -21,6 +35,7 @@ | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: | | R-50-FPN | caffe | 1x | 4.3 | | 38.0 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_20200504_231812.log.json) | | R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | | R-50-FPN | pytorch | 2x | - | - | 39.2 | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_20200505_003907.log.json) | | R-101-FPN | caffe | 1x | | | 40.4 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758.log.json)| | R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | diff --git a/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py b/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..fb8289b --- /dev/null +++ b/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './mask_rcnn_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/configs/mask_rcnn/metafile.yml b/configs/mask_rcnn/metafile.yml index 747a99a..f74bdf3 100644 --- a/configs/mask_rcnn/metafile.yml +++ b/configs/mask_rcnn/metafile.yml @@ -16,7 +16,7 @@ Collections: - RoIAlign Paper: URL: https://arxiv.org/abs/1703.06870v3 - Title: 'Mask R-CNN' + Title: "Mask R-CNN" README: configs/mask_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 @@ -64,6 +64,34 @@ Models: mask AP: 34.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth + - Name: mask_rcnn_r50_fpn_fp16_1x_coco + In Collection: Mask R-CNN + Config: configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 3.6 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 41.49 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 38.1 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 34.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth + - Name: mask_rcnn_r50_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py diff --git a/configs/ms_rcnn/README.md b/configs/ms_rcnn/README.md index f36b64c..5a80c0d 100644 --- a/configs/ms_rcnn/README.md +++ b/configs/ms_rcnn/README.md @@ -1,6 +1,20 @@ # Mask Scoring R-CNN -## Introduction +## Abstract + + + +Letting a deep network be aware of the quality of its own predictions is an interesting yet important problem. In the task of instance segmentation, the confidence of instance classification is used as mask quality score in most instance segmentation frameworks. However, the mask quality, quantified as the IoU between the instance mask and its ground truth, is usually not well correlated with classification score. In this paper, we study this problem and propose Mask Scoring R-CNN which contains a network block to learn the quality of the predicted instance masks. The proposed network block takes the instance feature and the corresponding predicted mask together to regress the mask IoU. The mask scoring strategy calibrates the misalignment between mask quality and mask score, and improves instance segmentation performance by prioritizing more accurate mask predictions during COCO AP evaluation. By extensive evaluations on the COCO dataset, Mask Scoring R-CNN brings consistent and noticeable gain with different models, and outperforms the state-of-the-art Mask R-CNN. We hope our simple and effective approach will provide a new direction for improving instance segmentation. + + +
+ +
+ + + + +## Citation diff --git a/configs/nas_fcos/README.md b/configs/nas_fcos/README.md index 21a2b22..2e3b56e 100644 --- a/configs/nas_fcos/README.md +++ b/configs/nas_fcos/README.md @@ -1,6 +1,20 @@ # NAS-FCOS: Fast Neural Architecture Search for Object Detection -## Introduction +## Abstract + + + +The success of deep neural networks relies on significant architecture engineering. Recently neural architecture search (NAS) has emerged as a promise to greatly reduce manual effort in network design by automatically searching for optimal architectures, although typically such algorithms need an excessive amount of computational resources, e.g., a few thousand GPU-days. To date, on challenging vision tasks such as object detection, NAS, especially fast versions of NAS, is less studied. Here we propose to search for the decoder structure of object detectors with search efficiency being taken into consideration. To be more specific, we aim to efficiently search for the feature pyramid network (FPN) as well as the prediction head of a simple anchor-free object detector, namely FCOS, using a tailored reinforcement learning paradigm. With carefully designed search space, search algorithms and strategies for evaluating network quality, we are able to efficiently search a top-performing detection architecture within 4 days using 8 V100 GPUs. The discovered architecture surpasses state-of-the-art object detection models (such as Faster R-CNN, RetinaNet and FCOS) by 1.5 to 3.5 points in AP on the COCO dataset, with comparable computation complexity and memory footprint, demonstrating the efficacy of the proposed NAS for object detection. + + +
+ +
+ + + + +## Citation diff --git a/configs/nas_fpn/README.md b/configs/nas_fpn/README.md index 9862b9a..b2836d7 100644 --- a/configs/nas_fpn/README.md +++ b/configs/nas_fpn/README.md @@ -1,6 +1,20 @@ # NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection -## Introduction +## Abstract + + + +Current state-of-the-art convolutional architectures for object detection are manually designed. Here we aim to learn a better architecture of feature pyramid network for object detection. We adopt Neural Architecture Search and discover a new feature pyramid architecture in a novel scalable search space covering all cross-scale connections. The discovered architecture, named NAS-FPN, consists of a combination of top-down and bottom-up connections to fuse features across scales. NAS-FPN, combined with various backbone models in the RetinaNet framework, achieves better accuracy and latency tradeoff compared to state-of-the-art object detection models. NAS-FPN improves mobile detection accuracy by 2 AP compared to state-of-the-art SSDLite with MobileNetV2 model in [32] and achieves 48.3 AP which surpasses Mask R-CNN [10] detection accuracy with less computation time. + + +
+ +
+ + + + +## Citation diff --git a/configs/paa/README.md b/configs/paa/README.md index 3e9b6e3..05e65d2 100644 --- a/configs/paa/README.md +++ b/configs/paa/README.md @@ -1,5 +1,21 @@ # Probabilistic Anchor Assignment with IoU Prediction for Object Detection +## Abstract + + + +In object detection, determining which anchors to assign as positive or negative samples, known as anchor assignment, has been revealed as a core procedure that can significantly affect a model's performance. In this paper we propose a novel anchor assignment strategy that adaptively separates anchors into positive and negative samples for a ground truth bounding box according to the model's learning status such that it is able to reason about the separation in a probabilistic manner. To do so we first calculate the scores of anchors conditioned on the model and fit a probability distribution to these scores. The model is then trained with anchors separated into positive and negative samples according to their probabilities. Moreover, we investigate the gap between the training and testing objectives and propose to predict the Intersection-over-Unions of detected boxes as a measure of localization quality to reduce the discrepancy. The combined score of classification and localization qualities serving as a box selection metric in non-maximum suppression well aligns with the proposed anchor assignment strategy and leads significant performance improvements. The proposed methods only add a single convolutional layer to RetinaNet baseline and does not require multiple anchors per location, so are efficient. Experimental results verify the effectiveness of the proposed methods. Especially, our models set new records for single-stage detectors on MS COCO test-dev dataset with various backbones. + + +
+ +
+ + + + +## Citation + ```latex diff --git a/configs/pafpn/README.md b/configs/pafpn/README.md index 9602c6d..8516d34 100644 --- a/configs/pafpn/README.md +++ b/configs/pafpn/README.md @@ -1,6 +1,20 @@ # Path Aggregation Network for Instance Segmentation -## Introduction +## Abstract + + + +The way that information propagates in neural networks is of great importance. In this paper, we propose Path Aggregation Network (PANet) aiming at boosting information flow in proposal-based instance segmentation framework. Specifically, we enhance the entire feature hierarchy with accurate localization signals in lower layers by bottom-up path augmentation, which shortens the information path between lower layers and topmost feature. We present adaptive feature pooling, which links feature grid and all feature levels to make useful information in each feature level propagate directly to following proposal subnetworks. A complementary branch capturing different views for each proposal is created to further improve mask prediction. These improvements are simple to implement, with subtle extra computational overhead. Our PANet reaches the 1st place in the COCO 2017 Challenge Instance Segmentation task and the 2nd place in Object Detection task without large-batch training. It is also state-of-the-art on MVD and Cityscapes. + + +
+ +
+ + + + +## Citation diff --git a/configs/panoptic_fpn/README.md b/configs/panoptic_fpn/README.md index d209183..083fb6e 100644 --- a/configs/panoptic_fpn/README.md +++ b/configs/panoptic_fpn/README.md @@ -1,5 +1,19 @@ # Panoptic feature pyramid networks -## Introduction +## Abstract + + + +The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. + + +
+ +
+ + + + +## Citation The base method for panoptic segmentation task. diff --git a/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py b/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py index 6f463c1..2995524 100644 --- a/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py +++ b/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py @@ -7,7 +7,8 @@ type='PanopticFPN', semantic_head=dict( type='PanopticFPNHead', - num_classes=54, + num_things_classes=80, + num_stuff_classes=53, in_channels=256, inner_channels=128, start_level=0, @@ -15,7 +16,7 @@ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), conv_cfg=None, loss_seg=dict( - type='CrossEntropyLoss', ignore_index=-1, loss_weight=0.5)), + type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), panoptic_fusion_head=dict( type='HeuristicFusionHead', num_things_classes=80, diff --git a/configs/pascal_voc/README.md b/configs/pascal_voc/README.md index 69e0220..af33edb 100644 --- a/configs/pascal_voc/README.md +++ b/configs/pascal_voc/README.md @@ -1,4 +1,22 @@ -# PASCAL VOC Dataset +# The Pascal Visual Object Classes (VOC) Challenge + +## Abstract + + + +The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection. + +This paper describes the dataset and evaluation procedure. We review the state-of-the-art in evaluated methods for both classification and detection, analyse whether the methods are statistically different, what they are learning from the images (e.g. the object or its context), and what the methods find easy or confuse. The paper concludes with lessons learnt in the three year history of the challenge, and proposes directions for future improvement and extension. + + +
+ +
+ + + + +## Citation diff --git a/configs/pascal_voc/ssd300_voc0712.py b/configs/pascal_voc/ssd300_voc0712.py index 271ebe3..4d6bbd9 100644 --- a/configs/pascal_voc/ssd300_voc0712.py +++ b/configs/pascal_voc/ssd300_voc0712.py @@ -11,14 +11,8 @@ data_root = 'data/VOCdevkit/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -29,8 +23,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] diff --git a/configs/pascal_voc/ssd512_voc0712.py b/configs/pascal_voc/ssd512_voc0712.py index ea2b69f..f4627c2 100644 --- a/configs/pascal_voc/ssd512_voc0712.py +++ b/configs/pascal_voc/ssd512_voc0712.py @@ -1,6 +1,11 @@ _base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( + neck=dict( + out_channels=(512, 1024, 512, 256, 256, 256, 256), + level_strides=(2, 2, 2, 2, 1), + level_paddings=(1, 1, 1, 1, 1), + last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( @@ -10,14 +15,8 @@ ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -28,8 +27,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] diff --git a/configs/pisa/README.md b/configs/pisa/README.md index 3ae3392..d1ea3a4 100644 --- a/configs/pisa/README.md +++ b/configs/pisa/README.md @@ -1,6 +1,20 @@ # Prime Sample Attention in Object Detection -## Introduction +## Abstract + + + +It is a common paradigm in object detection frameworks to treat all samples equally and target at maximizing the performance on average. In this work, we revisit this paradigm through a careful study on how different samples contribute to the overall performance measured in terms of mAP. Our study suggests that the samples in each mini-batch are neither independent nor equally important, and therefore a better classifier on average does not necessarily mean higher mAP. Motivated by this study, we propose the notion of Prime Samples, those that play a key role in driving the detection performance. We further develop a simple yet effective sampling and learning strategy called PrIme Sample Attention (PISA) that directs the focus of the training process towards such samples. Our experiments demonstrate that it is often more effective to focus on prime samples than hard samples when training a detector. Particularly, On the MSCOCO dataset, PISA outperforms the random sampling baseline and hard mining schemes, e.g., OHEM and Focal Loss, consistently by around 2% on both single-stage and two-stage detectors, even with a strong backbone ResNeXt-101. + + +
+ +
+ + + + +## Citation diff --git a/configs/point_rend/README.md b/configs/point_rend/README.md index fafb29c..4a9b6f2 100644 --- a/configs/point_rend/README.md +++ b/configs/point_rend/README.md @@ -1,6 +1,20 @@ -# PointRend +# PointRend: Image Segmentation as Rendering -## Introduction +## Abstract + + + +We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. + + +
+ +
+ + + + +## Citation diff --git a/configs/pvt/README.md b/configs/pvt/README.md new file mode 100644 index 0000000..368edb8 --- /dev/null +++ b/configs/pvt/README.md @@ -0,0 +1,60 @@ +# Pyramid vision transformer: A versatile backbone for dense prediction without convolutions + +## Abstract + + + +Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. + +Transformer recently has shown encouraging progresses in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (abbreviated as PVTv1) by adding three designs, including (1) overlapping patch embedding, (2) convolutional feed-forward networks, and (3) linear complexity attention layers. +With these modifications, our PVTv2 significantly improves PVTv1 on three tasks e.g., classification, detection, and segmentation. Moreover, PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. + + +
+ +
+ + + + +## Citation + + + +```latex +@article{wang2021pyramid, + title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2102.12122}, + year={2021} +} +``` + +```latex +@article{wang2021pvtv2, + title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, + author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, + journal={arXiv preprint arXiv:2106.13797}, + year={2021} +} +``` +## Results and Models + +### RetinaNet (PVTv1) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +|:-----------:|:-------:|:--------:|:------:|:------:|:--------:| +| PVT-Tiny | 12e |8.5 |36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_t_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110.log.json) | +| PVT-Small | 12e |14.5 |40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_s_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921.log.json) | +| PVT-Medium | 12e |20.9 |41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_m_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243.log.json) | + +### RetinaNet (PVTv2) + +| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | +|:-----------:|:-------:|:--------:|:------:|:------:|:--------:| +| PVTv2-B0 | 12e |7.4 |37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b0_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157.log.json) | +| PVTv2-B1 | 12e |9.5 |41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b1_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318.log.json) | +| PVTv2-B2 | 12e |16.2 |44.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b2_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843.log.json) | +| PVTv2-B3 | 12e |23.0 |46.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b3_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512.log.json) | +| PVTv2-B4 | 12e |17.0 |46.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b4_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151.log.json) | +| PVTv2-B5 | 12e |18.7 |46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800.log.json) | diff --git a/configs/pvt/metafile.yml b/configs/pvt/metafile.yml new file mode 100644 index 0000000..48a0e2c --- /dev/null +++ b/configs/pvt/metafile.yml @@ -0,0 +1,136 @@ +Collections: + - Name: PVT + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - RetinaNet + - PyramidVisionTransformer + - FPN + Paper: https://arxiv.org/abs/2102.12122 + README: configs/pvt/README.md + - Name: PVT-v2 + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x NVIDIA V100 GPUs + Architecture: + - RetinaNet + - PyramidVisionTransformerV2 + - FPN + Paper: https://arxiv.org/abs/2106.13797 + README: configs/pvt/README.md +Models: + - Name: retinanet_pvt-t_fpn_1x_coco + In Collection: PVT + Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth + - Name: retinanet_pvt-s_fpn_1x_coco + In Collection: PVT + Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py + Metadata: + Training Memory (GB): 14.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth + - Name: retinanet_pvt-m_fpn_1x_coco + In Collection: PVT + Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py + Metadata: + Training Memory (GB): 20.9 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth + - Name: retinanet_pvtv2-b0_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 37.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth + - Name: retinanet_pvtv2-b1_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py + Metadata: + Training Memory (GB): 9.5 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 41.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth + - Name: retinanet_pvtv2-b2_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py + Metadata: + Training Memory (GB): 16.2 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth + - Name: retinanet_pvtv2-b3_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py + Metadata: + Training Memory (GB): 23.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth + - Name: retinanet_pvtv2-b4_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py + Metadata: + Training Memory (GB): 17.0 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth + - Name: retinanet_pvtv2-b5_fpn_1x_coco + In Collection: PVT-v2 + Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py + Metadata: + Training Memory (GB): 18.7 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth diff --git a/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py b/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py new file mode 100644 index 0000000..e299f2a --- /dev/null +++ b/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_large.pth'))) +fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py b/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py new file mode 100644 index 0000000..b888f78 --- /dev/null +++ b/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_medium.pth'))) diff --git a/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py b/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py new file mode 100644 index 0000000..4660348 --- /dev/null +++ b/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py @@ -0,0 +1,6 @@ +_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' +model = dict( + backbone=dict( + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_small.pth'))) diff --git a/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py b/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py new file mode 100644 index 0000000..a6cff7d --- /dev/null +++ b/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformer', + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_tiny.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py new file mode 100644 index 0000000..cbe2295 --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='RetinaNet', + backbone=dict( + _delete_=True, + type='PyramidVisionTransformerV2', + embed_dims=32, + num_layers=[2, 2, 2, 2], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b0.pth')), + neck=dict(in_channels=[32, 64, 160, 256])) +# optimizer +optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py new file mode 100644 index 0000000..5374c50 --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py @@ -0,0 +1,7 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b1.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py new file mode 100644 index 0000000..cf9a18d --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 6, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b2.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py new file mode 100644 index 0000000..7a47f82 --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py @@ -0,0 +1,8 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 4, 18, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b3.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py new file mode 100644 index 0000000..ec9103b --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py @@ -0,0 +1,13 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 8, 27, 3], + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b4.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) +# dataset settings +data = dict(samples_per_gpu=1, workers_per_gpu=1) diff --git a/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py b/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py new file mode 100644 index 0000000..d8e6d23 --- /dev/null +++ b/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py @@ -0,0 +1,14 @@ +_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' +model = dict( + backbone=dict( + embed_dims=64, + num_layers=[3, 6, 40, 3], + mlp_ratios=(4, 4, 4, 4), + init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' + 'releases/download/v2/pvt_v2_b5.pth')), + neck=dict(in_channels=[64, 128, 320, 512])) +# optimizer +optimizer = dict( + _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) +# dataset settings +data = dict(samples_per_gpu=1, workers_per_gpu=1) diff --git a/configs/queryinst/README.md b/configs/queryinst/README.md new file mode 100644 index 0000000..7129dd4 --- /dev/null +++ b/configs/queryinst/README.md @@ -0,0 +1,40 @@ +# Instances as Queries + +## Abstract + + + +We present QueryInst, a new perspective for instance segmentation. QueryInst is a multi-stage end-to-end system that treats instances of interest as learnable queries, enabling query based object detectors, e.g., Sparse R-CNN, to have strong instance segmentation performance. The attributes of instances such as categories, bounding boxes, instance masks, and instance association embeddings are represented by queries in a unified manner. In QueryInst, a query is shared by both detection and segmentation via dynamic convolutions and driven by parallelly-supervised multi-stage learning. We conduct extensive experiments on three challenging benchmarks, i.e., COCO, CityScapes, and YouTube-VIS to evaluate the effectiveness of QueryInst in object detection, instance segmentation, and video instance segmentation tasks. For the first time, we demonstrate that a simple end-to-end query based framework can achieve the state-of-the-art performance in various instance-level recognition tasks. + + +
+ +
+ + + + +## Citation + + + +``` +@InProceedings{Fang_2021_ICCV, + author = {Fang, Yuxin and Yang, Shusheng and Wang, Xinggang and Li, Yu and Fang, Chen and Shan, Ying and Feng, Bin and Liu, Wenyu}, + title = {Instances As Queries}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2021}, + pages = {6910-6919} +} +``` + +## Results and Models + +| Model | Backbone | Style | Lr schd | Number of Proposals |Multi-Scale| RandomCrop | box AP | mask AP | Config | Download | +|:------------:|:---------:|:-------:|:-------:|:-------: |:-------: |:---------:|:------:|:------:|:------:|:--------:| +| QueryInst | R-50-FPN | pytorch | 1x | 100 | False | False | 42.0 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 100 | True | False | 44.8 | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643.log.json) | +| QueryInst | R-50-FPN | pytorch | 3x | 300 | True | True | 47.5 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 100 | True | False | 46.4 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048.log.json) | +| QueryInst | R-101-FPN | pytorch | 3x | 300 | True | True | 49.0 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621.log.json) | diff --git a/configs/queryinst/metafile.yml b/configs/queryinst/metafile.yml new file mode 100644 index 0000000..da7f0a7 --- /dev/null +++ b/configs/queryinst/metafile.yml @@ -0,0 +1,100 @@ +Collections: + - Name: QueryInst + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - ResNet + - QueryInst + Paper: + URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf + Title: 'Instances as Queries' + README: configs/queryinst/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/queryinst.py + Version: v2.18.0 + +Models: + - Name: queryinst_r50_fpn_1x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py + Metadata: + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 37.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth + + - Name: queryinst_r50_fpn_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.8 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth + + - Name: queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.5 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth + + - Name: queryinst_r101_fpn_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.4 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth + + - Name: queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco + In Collection: QueryInst + Config: configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py + Metadata: + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 42.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth diff --git a/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..fd138f5 --- /dev/null +++ b/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py b/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..07cae19 --- /dev/null +++ b/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,7 @@ +_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/queryinst/queryinst_r50_fpn_1x_coco.py b/configs/queryinst/queryinst_r50_fpn_1x_coco.py new file mode 100644 index 0000000..48f5773 --- /dev/null +++ b/configs/queryinst/queryinst_r50_fpn_1x_coco.py @@ -0,0 +1,138 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +num_stages = 6 +num_proposals = 100 +model = dict( + type='QueryInst', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + add_extra_convs='on_input', + num_outs=4), + rpn_head=dict( + type='EmbeddingRPNHead', + num_proposals=num_proposals, + proposal_feature_channel=256), + roi_head=dict( + type='SparseRoIHead', + num_stages=num_stages, + stage_loss_weights=[1] * num_stages, + proposal_feature_channel=256, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='DIIHead', + num_classes=80, + num_ffn_fcs=2, + num_heads=8, + num_cls_fcs=1, + num_reg_fcs=3, + feedforward_channels=2048, + in_channels=256, + dropout=0.0, + ffn_act_cfg=dict(type='ReLU', inplace=True), + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=False, + target_means=[0., 0., 0., 0.], + target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) + ], + mask_head=[ + dict( + type='DynamicMaskHead', + dynamic_conv_cfg=dict( + type='DynamicConv', + in_channels=256, + feat_channels=64, + out_channels=256, + input_feat_shape=14, + with_proj=False, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN')), + num_convs=4, + num_classes=80, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + class_agnostic=False, + norm_cfg=dict(type='BN'), + upsample_cfg=dict(type='deconv', scale_factor=2), + loss_mask=dict( + type='DiceLoss', + loss_weight=8.0, + use_sigmoid=True, + activate=False, + eps=1e-5)) for _ in range(num_stages) + ]), + # training and testing settings + train_cfg=dict( + rpn=None, + rcnn=[ + dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', + weight=2.0)), + sampler=dict(type='PseudoSampler'), + pos_weight=1, + mask_size=28, + ) for _ in range(num_stages) + ]), + test_cfg=dict( + rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) + +# optimizer +optimizer = dict( + _delete_=True, + type='AdamW', + lr=0.0001, + weight_decay=0.0001, + paramwise_cfg=dict( + custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) +optimizer_config = dict( + _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..3089b3c --- /dev/null +++ b/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py @@ -0,0 +1,54 @@ +_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' +num_proposals = 300 +model = dict( + rpn_head=dict(num_proposals=num_proposals), + test_cfg=dict( + _delete_=True, + rpn=None, + rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# augmentation strategy originates from DETR. +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='AutoAugment', + policies=[[ + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), + (608, 1333), (640, 1333), (672, 1333), (704, 1333), + (736, 1333), (768, 1333), (800, 1333)], + multiscale_mode='value', + keep_ratio=True) + ], + [ + dict( + type='Resize', + img_scale=[(400, 1333), (500, 1333), (600, 1333)], + multiscale_mode='value', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=(384, 600), + allow_negative_crop=True), + dict( + type='Resize', + img_scale=[(480, 1333), (512, 1333), (544, 1333), + (576, 1333), (608, 1333), (640, 1333), + (672, 1333), (704, 1333), (736, 1333), + (768, 1333), (800, 1333)], + multiscale_mode='value', + override=True, + keep_ratio=True) + ]]), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py b/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py new file mode 100644 index 0000000..89e2cd1 --- /dev/null +++ b/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py @@ -0,0 +1,23 @@ +_base_ = './queryinst_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, value) for value in min_values], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) +] + +data = dict(train=dict(pipeline=train_pipeline)) +lr_config = dict(policy='step', step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/configs/regnet/README.md b/configs/regnet/README.md index 79ee258..51f44df 100644 --- a/configs/regnet/README.md +++ b/configs/regnet/README.md @@ -1,13 +1,29 @@ # Designing Network Design Spaces +## Abstract + + + + In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + + +
+ +
+ + + + ## Introduction -[BACKBONE] + We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet. The pre-trained modles are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). +## Citation + ```latex @article{radosavovic2020designing, title={Designing Network Design Spaces}, diff --git a/configs/reppoints/README.md b/configs/reppoints/README.md index fc58915..8e53bd3 100644 --- a/configs/reppoints/README.md +++ b/configs/reppoints/README.md @@ -1,24 +1,33 @@ # RepPoints: Point Set Representation for Object Detection +## Abstract + + + +Modern object detectors rely heavily on rectangular bounding boxes, such as anchors, proposals and the final predictions, to represent objects at various recognition stages. The bounding box is convenient to use but provides only a coarse localization of objects and leads to a correspondingly coarse extraction of object features. In this paper, we present RepPoints(representative points), a new finer representation of objects as a set of sample points useful for both localization and recognition. Given ground truth localization and recognition targets for training, RepPoints learn to automatically arrange themselves in a manner that bounds the spatial extent of an object and indicates semantically significant local areas. They furthermore do not require the use of anchors to sample a space of bounding boxes. We show that an anchor-free object detector based on RepPoints can be as effective as the state-of-the-art anchor-based detection methods, with 46.5 AP and 67.4 AP50 on the COCO test-dev detection benchmark, using ResNet-101 model. + + +
+ +
+ + + + +## Introdution + By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/). We provide code support and configuration files to reproduce the results in the paper for ["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection. -## Introduction - **RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection. Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation. -
- -

Learning RepPoints in Object Detection.

-
- -## Citing RepPoints +## Citation ``` @inproceedings{yang2019reppoints, diff --git a/configs/res2net/README.md b/configs/res2net/README.md index 864a891..3a76bef 100644 --- a/configs/res2net/README.md +++ b/configs/res2net/README.md @@ -1,4 +1,18 @@ -# Res2Net for object detection and instance segmentation +# Res2Net: A New Multi-scale Backbone Architecture + +## Abstract + + + +Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. + + +
+ +
+ + + ## Introduction @@ -19,6 +33,8 @@ Compared with other backbone networks, Res2Net requires fewer parameters and FLO - GFLOPs for classification are calculated with image size (224x224). +## Citation + ```latex @article{gao2019res2net, title={Res2Net: A New Multi-scale Backbone Architecture}, diff --git a/configs/resnest/README.md b/configs/resnest/README.md index a23a39a..de80430 100644 --- a/configs/resnest/README.md +++ b/configs/resnest/README.md @@ -1,8 +1,22 @@ # ResNeSt: Split-Attention Networks -## Introduction +## Abstract -[BACKBONE] + + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. + + +
+ +
+ + + + +## Citation + + ```latex @article{zhang2020resnest, diff --git a/configs/resnest/metafile.yml b/configs/resnest/metafile.yml index d7f68e5..3323fad 100644 --- a/configs/resnest/metafile.yml +++ b/configs/resnest/metafile.yml @@ -11,7 +11,7 @@ Collections: Paper: URL: https://arxiv.org/abs/2004.08955 Title: 'ResNeSt: Split-Attention Networks' - README: configs/renest/README.md + README: configs/resnest/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 Version: v2.7.0 diff --git a/configs/retinanet/README.md b/configs/retinanet/README.md index e5f099e..34dd55e 100644 --- a/configs/retinanet/README.md +++ b/configs/retinanet/README.md @@ -1,6 +1,20 @@ # Focal Loss for Dense Object Detection -## Introduction +## Abstract + + + +The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors. + + +
+ +
+ + + + +## Citation @@ -19,6 +33,7 @@ | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: | | R-50-FPN | caffe | 1x | 3.5 | 18.6 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531_012518.log.json) | | R-50-FPN | pytorch | 1x | 3.8 | 19.0 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | +| R-50-FPN (FP16) | pytorch | 1x | 2.8 | 31.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | | R-50-FPN | pytorch | 2x | - | - | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131_114738.log.json) | | R-101-FPN | caffe | 1x | 5.5 | 14.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531_012536.log.json) | | R-101-FPN | pytorch | 1x | 5.7 | 15.0 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130_003055.log.json) | diff --git a/configs/retinanet/metafile.yml b/configs/retinanet/metafile.yml index efff090..b04a06c 100644 --- a/configs/retinanet/metafile.yml +++ b/configs/retinanet/metafile.yml @@ -12,7 +12,7 @@ Collections: - ResNet Paper: URL: https://arxiv.org/abs/1708.02002 - Title: 'Focal Loss for Dense Object Detection' + Title: "Focal Loss for Dense Object Detection" README: configs/retinanet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6 @@ -59,6 +59,30 @@ Models: box AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth + - Name: retinanet_r50_fpn_fp16_1x_coco + In Collection: RetinaNet + Config: configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py + Metadata: + Training Memory (GB): 2.8 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + inference time (ms/im): + - value: 31.65 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP16 + resolution: (800, 1333) + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 36.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth + - Name: retinanet_r50_fpn_2x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py @@ -167,7 +191,6 @@ Models: box AP: 41 Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth - - Name: retinanet_x101_32x4d_fpn_1x_coco In Collection: RetinaNet Config: configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py diff --git a/configs/retinanet/retinanet_r50_fpn_90k_coco.py b/configs/retinanet/retinanet_r50_fpn_90k_coco.py new file mode 100644 index 0000000..ceda327 --- /dev/null +++ b/configs/retinanet/retinanet_r50_fpn_90k_coco.py @@ -0,0 +1,15 @@ +_base_ = 'retinanet_r50_fpn_1x_coco.py' + +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[60000, 80000]) + +# Runner type +runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) + +checkpoint_config = dict(interval=10000) +evaluation = dict(interval=10000, metric='bbox') diff --git a/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py b/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py new file mode 100644 index 0000000..6b6cebe --- /dev/null +++ b/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py @@ -0,0 +1,3 @@ +_base_ = './retinanet_r50_fpn_1x_coco.py' +# fp16 settings +fp16 = dict(loss_scale=512.) diff --git a/configs/rpn/README.md b/configs/rpn/README.md index 03b293d..d1bf674 100644 --- a/configs/rpn/README.md +++ b/configs/rpn/README.md @@ -1,6 +1,20 @@ # Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks -## Introduction +## Abstract + + + +State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. + + +
+ +
+ + + + +## Citation diff --git a/configs/sabl/README.md b/configs/sabl/README.md index c3bf169..c090d00 100644 --- a/configs/sabl/README.md +++ b/configs/sabl/README.md @@ -1,6 +1,21 @@ # Side-Aware Boundary Localization for More Precise Object Detection -## Introduction +## Abstract + + + +Current object detection frameworks mainly rely on bounding box regression to localize objects. Despite the remarkable progress in recent years, the precision of bounding box regression remains unsatisfactory, hence limiting performance in object detection. We observe that precise localization requires careful placement of each side of the bounding box. However, the mainstream approach, which focuses on predicting centers and sizes, is not the most effective way to accomplish this task, especially when there exists displacements with large variance between the anchors and the targets. In this paper, we propose an alternative approach, named as Side-Aware Boundary Localization (SABL), where each side of the bounding box is respectively localized with a dedicated network branch. To tackle the difficulty of precise localization in the presence of displacements with large variance, we further propose a two-step localization scheme, which first predicts a range of movement through bucket prediction and then pinpoints the precise position within the predicted bucket. We test the proposed method on both two-stage and single-stage detection frameworks. Replacing the standard bounding box regression branch with the proposed design leads to significant improvements on Faster R-CNN, RetinaNet, and Cascade R-CNN, by 3.0%, 1.7%, and 0.9%, respectively. + + +
+ +
+ + + + +## Citation diff --git a/configs/scnet/README.md b/configs/scnet/README.md index 3757be1..52f3b49 100644 --- a/configs/scnet/README.md +++ b/configs/scnet/README.md @@ -1,6 +1,20 @@ -# SCNet +# SCNet: Training Inference Sample Consistency for Instance Segmentation -## Introduction +## Abstract + + + +Cascaded architectures have brought significant performance improvement in object detection and instance segmentation. However, there are lingering issues regarding the disparity in the Intersection-over-Union (IoU) distribution of the samples between training and inference. This disparity can potentially exacerbate detection accuracy. This paper proposes an architecture referred to as Sample Consistency Network (SCNet) to ensure that the IoU distribution of the samples at training time is close to that at inference time. Furthermore, SCNet incorporates feature relay and utilizes global contextual information to further reinforce the reciprocal relationships among classifying, detecting, and segmenting sub-tasks. Extensive experiments on the standard COCO dataset reveal the effectiveness of the proposed method over multiple evaluation metrics, including box AP, mask AP, and inference speed. In particular, while running 38\% faster, the proposed SCNet improves the AP of the box and mask predictions by respectively 1.3 and 2.3 points compared to the strong Cascade Mask R-CNN baseline. + + +
+ +
+ + + + +## Citation @@ -40,12 +54,12 @@ The results on COCO 2017val are shown in the below table. (results on test-dev a | Backbone | Style | Lr schd | Mem (GB) | Inf speed (fps) | box AP | mask AP | TTA box AP | TTA mask AP | Config | Download | |:---------------:|:-------:|:-------:|:--------:|:---------------:|:------:|:-------:|:----------:|:-----------:|:------:|:------------:| -| R-50-FPN | pytorch | 1x | 7.0 | 6.2 | 43.5 | 39.2 | 44.8 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py) | [model](https://drive.google.com/file/d/1K5_8-P0EC43WZFtoO3q9_JE-df8pEc7J/view?usp=sharing) \| [log](https://drive.google.com/file/d/1ZFS6QhFfxlOnDYPiGpSDP_Fzgb7iDGN3/view?usp=sharing) | -| R-50-FPN | pytorch | 20e | 7.0 | 6.2 | 44.5 | 40.0 | 45.8 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_20e_coco.py) | [model](https://drive.google.com/file/d/15VGLCt5-IO5TbzB4Kw6ZyoF6QH0Q511A/view?usp=sharing) \| [log](https://drive.google.com/file/d/1-LnkOXN8n5ojQW34H0qZ625cgrnWpqSX/view?usp=sharing) | -| R-101-FPN | pytorch | 20e | 8.9 | 5.8 | 45.8 | 40.9 | 47.3 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r101_fpn_20e_coco.py) | [model](https://drive.google.com/file/d/1aeCGHsOBdfIqVBnBPp0JUE_RSIau3583/view?usp=sharing) \| [log](https://drive.google.com/file/d/1iRx-9GRgTaIDsz-we3DGwFVH22nbvCLa/view?usp=sharing) | -| X-101-64x4d-FPN | pytorch | 20e | 13.2 | 4.9 | 47.5 | 42.3 | 48.9 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py) | [model](https://drive.google.com/file/d/1YjgutUKz4TTPpqSWGKUTkZJ8_X-kyCfY/view?usp=sharing) \| [log](https://drive.google.com/file/d/1OsfQJ8gwtqIQ61k358yxY21sCvbUcRjs/view?usp=sharing) | +| R-50-FPN | pytorch | 1x | 7.0 | 6.2 | 43.5 | 39.2 | 44.8 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco_20210117_192725.log.json) | +| R-50-FPN | pytorch | 20e | 7.0 | 6.2 | 44.5 | 40.0 | 45.8 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco_20210116_060148.log.json) | +| R-101-FPN | pytorch | 20e | 8.9 | 5.8 | 45.8 | 40.9 | 47.3 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco_20210118_175824.log.json) | +| X-101-64x4d-FPN | pytorch | 20e | 13.2 | 4.9 | 47.5 | 42.3 | 48.9 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco_20210120_045959.log.json) | ### Notes - Training hyper-parameters are identical to those of [HTC](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc). -- TTA means Test Time Augmentation, which applies horizonal flip and multi-scale testing. Refer to [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py). +- TTA means Test Time Augmentation, which applies horizontal flip and multi-scale testing. Refer to [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py). diff --git a/configs/scnet/metafile.yml b/configs/scnet/metafile.yml index bcdf7d7..15eaebf 100644 --- a/configs/scnet/metafile.yml +++ b/configs/scnet/metafile.yml @@ -41,7 +41,7 @@ Models: Dataset: COCO Metrics: mask AP: 39.2 - Weights: https://drive.google.com/file/d/1K5_8-P0EC43WZFtoO3q9_JE-df8pEc7J/view?usp=sharing + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth - Name: scnet_r50_fpn_20e_coco In Collection: SCNet @@ -65,7 +65,7 @@ Models: Dataset: COCO Metrics: mask AP: 40.0 - Weights: https://drive.google.com/file/d/15VGLCt5-IO5TbzB4Kw6ZyoF6QH0Q511A/view?usp=sharing + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth - Name: scnet_r101_fpn_20e_coco In Collection: SCNet @@ -89,7 +89,7 @@ Models: Dataset: COCO Metrics: mask AP: 40.9 - Weights: https://drive.google.com/file/d/1aeCGHsOBdfIqVBnBPp0JUE_RSIau3583/view?usp=sharing + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth - Name: scnet_x101_64x4d_fpn_20e_coco In Collection: SCNet @@ -113,4 +113,4 @@ Models: Dataset: COCO Metrics: mask AP: 42.3 - Weights: https://drive.google.com/file/d/1YjgutUKz4TTPpqSWGKUTkZJ8_X-kyCfY/view?usp=sharing + Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth diff --git a/configs/scratch/README.md b/configs/scratch/README.md index 6b30447..68d3a72 100644 --- a/configs/scratch/README.md +++ b/configs/scratch/README.md @@ -1,6 +1,20 @@ # Rethinking ImageNet Pre-training -## Introduction +## Abstract + + + +We report competitive results on object detection and instance segmentation on the COCO dataset using standard models trained from random initialization. The results are no worse than their ImageNet pre-training counterparts even when using the hyper-parameters of the baseline system (Mask R-CNN) that were optimized for fine-tuning pre-trained models, with the sole exception of increasing the number of training iterations so the randomly initialized models may converge. Training from random initialization is surprisingly robust; our results hold even when: (i) using only 10% of the training data, (ii) for deeper and wider models, and (iii) for multiple tasks and metrics. Experiments show that ImageNet pre-training speeds up convergence early in training, but does not necessarily provide regularization or improve final target task accuracy. To push the envelope we demonstrate 50.9 AP on COCO object detection without using any external data---a result on par with the top COCO 2017 competition results that used ImageNet pre-training. These observations challenge the conventional wisdom of ImageNet pre-training for dependent tasks and we expect these discoveries will encourage people to rethink the current de facto paradigm of `pre-training and fine-tuning' in computer vision. + + +
+ +
+ + + + +## Citation diff --git a/configs/seesaw_loss/README.md b/configs/seesaw_loss/README.md index 47cbff8..9c1fcb0 100644 --- a/configs/seesaw_loss/README.md +++ b/configs/seesaw_loss/README.md @@ -1,6 +1,20 @@ -# Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) +# Seesaw Loss for Long-Tailed Instance Segmentation -## Introduction +## Abstract + + + +Instance segmentation has witnessed a remarkable progress on class-balanced benchmarks. However, they fail to perform as accurately in real-world scenarios, where the category distribution of objects naturally comes with a long tail. Instances of head classes dominate a long-tailed dataset and they serve as negative samples of tail categories. The overwhelming gradients of negative samples on tail classes lead to a biased learning process for classifiers. Consequently, objects of tail categories are more likely to be misclassified as backgrounds or head categories. To tackle this problem, we propose Seesaw Loss to dynamically re-balance gradients of positive and negative samples for each category, with two complementary factors, i.e., mitigation factor and compensation factor. The mitigation factor reduces punishments to tail categories w.r.t. the ratio of cumulative training instances between different categories. Meanwhile, the compensation factor increases the penalty of misclassified instances to avoid false positives of tail categories. We conduct extensive experiments on Seesaw Loss with mainstream frameworks and different data sampling strategies. With a simple end-to-end training pipeline, Seesaw Loss obtains significant gains over Cross-Entropy Loss, and achieves state-of-the-art performance on LVIS dataset without bells and whistles. + + +
+ +
+ + + + +## Citation diff --git a/configs/seesaw_loss/metafile.yml b/configs/seesaw_loss/metafile.yml new file mode 100644 index 0000000..70dd2fe --- /dev/null +++ b/configs/seesaw_loss/metafile.yml @@ -0,0 +1,203 @@ +Collections: + - Name: Seesaw Loss + Metadata: + Training Data: LVIS + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - Softmax + - RPN + - Convolution + - Dense Connections + - FPN + - ResNet + - RoIAlign + - Seesaw Loss + Paper: + URL: https://arxiv.org/abs/2008.10032 + Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' + README: configs/seesaw_loss/README.md + +Models: + - Name: mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth + - Name: mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 25.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 25.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth + - Name: mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.4 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth + - Name: mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.2 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth + - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth + - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 27.6 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 26.8 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth + - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 27.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth + - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 28.9 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 28.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth + - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.1 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth + - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 33.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth + - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 30.0 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 29.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth + - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 + In Collection: Seesaw Loss + Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py + Metadata: + Epochs: 24 + Results: + - Task: Object Detection + Dataset: LVIS v1 + Metrics: + box AP: 32.8 + - Task: Instance Segmentation + Dataset: LVIS v1 + Metrics: + mask AP: 30.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth diff --git a/configs/selfsup_pretrain/README.md b/configs/selfsup_pretrain/README.md index ad5c69f..2c61bd6 100644 --- a/configs/selfsup_pretrain/README.md +++ b/configs/selfsup_pretrain/README.md @@ -1,6 +1,27 @@ # Backbones Trained by Self-Supervise Algorithms -## Introduction +## Abstract + + + +Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or views) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a swapped prediction mechanism where we predict the cluster assignment of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements much. We validate our findings by achieving 75.3% top-1 accuracy on ImageNet with ResNet-50, as well as surpassing supervised pretraining on all the considered transfer tasks. + + +
+ +
+ +We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks. + + +
+ +
+ + + + +## Citation diff --git a/configs/solo/README.md b/configs/solo/README.md new file mode 100644 index 0000000..76664b7 --- /dev/null +++ b/configs/solo/README.md @@ -0,0 +1,56 @@ +# SOLO: Segmenting Objects by Locations + +## Abstract + + + +We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation. + + +
+ +
+ + + + +## Citation + +``` +@inproceedings{wang2020solo, + title = {{SOLO}: Segmenting Objects by Locations}, + author = {Wang, Xinlong and Kong, Tao and Shen, Chunhua and Jiang, Yuning and Li, Lei}, + booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, + year = {2020} +} +``` + +## Results and Models + +### SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +|:---------:|:-------:|:--------:|:-------:|:--------:|:--------------:|:------:|:--------:| +| R-50 | pytorch | N | 1x | 8.0 | 14.0 | 33.1 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055.log.json) | +| R-50 | pytorch | Y | 3x | 7.4 | 14.0 | 35.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353.log.json) | + +### Decoupled SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +|:---------:|:-------:|:--------:|:-------:|:--------:|:--------------:|:-------:|:--------:| +| R-50 | pytorch | N | 1x | 7.8 | 12.5 | 33.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348.log.json) | +| R-50 | pytorch | Y | 3x | 7.9 | 12.5 | 36.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504.log.json) | + +- Decoupled SOLO has a decoupled head which is different from SOLO head. +Decoupled SOLO serves as an efficient and equivalent variant in accuracy +of SOLO. Please refer to the corresponding config files for details. + +### Decoupled Light SOLO + +| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | +|:---------:|:-------:|:--------:|:-------:|:--------:|:--------------:|:------:|:--------:| +| R-50 | pytorch | Y | 3x | 2.2 | 31.2 | 32.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703.log.json) | + +- Decoupled Light SOLO using decoupled structure similar to Decoupled +SOLO head, with light-weight head and smaller input size, Please refer +to the corresponding config files for details. diff --git a/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py b/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py new file mode 100644 index 0000000..101f8f1 --- /dev/null +++ b/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py @@ -0,0 +1,63 @@ +_base_ = './decoupled_solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOLightHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), + (852, 352)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(852, 512), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/solo/decoupled_solo_r50_fpn_1x_coco.py b/configs/solo/decoupled_solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000..b611cdf --- /dev/null +++ b/configs/solo/decoupled_solo_r50_fpn_1x_coco.py @@ -0,0 +1,28 @@ +_base_ = [ + './solo_r50_fpn_1x_coco.py', +] +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) + +optimizer = dict(type='SGD', lr=0.01) diff --git a/configs/solo/decoupled_solo_r50_fpn_3x_coco.py b/configs/solo/decoupled_solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000..4a8c19d --- /dev/null +++ b/configs/solo/decoupled_solo_r50_fpn_3x_coco.py @@ -0,0 +1,25 @@ +_base_ = './solo_r50_fpn_3x_coco.py' + +# model settings +model = dict( + mask_head=dict( + type='DecoupledSOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict( + type='DiceLoss', use_sigmoid=True, activate=False, + loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) diff --git a/configs/solo/metafile.yml b/configs/solo/metafile.yml new file mode 100644 index 0000000..b6244e8 --- /dev/null +++ b/configs/solo/metafile.yml @@ -0,0 +1,115 @@ +Collections: + - Name: SOLO + Metadata: + Training Data: COCO + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - FPN + - Convolution + - ResNet + Paper: https://arxiv.org/abs/1912.04488 + README: configs/solo/README.md + +Models: + - Name: decoupled_solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 12 + inference time (ms/im): + - value: 116.4 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth + + - Name: decoupled_solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.9 + Epochs: 36 + inference time (ms/im): + - value: 117.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 36.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth + + - Name: decoupled_solo_light_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 2.2 + Epochs: 36 + inference time (ms/im): + - value: 35.0 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (852, 512) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 32.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth + + - Name: solo_r50_fpn_3x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_3x_coco.py + Metadata: + Training Memory (GB): 7.4 + Epochs: 36 + inference time (ms/im): + - value: 94.2 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 35.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth + + - Name: solo_r50_fpn_1x_coco + In Collection: SOLO + Config: configs/solo/solo_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 8.0 + Epochs: 12 + inference time (ms/im): + - value: 95.1 + hardware: V100 + backend: PyTorch + batch size: 1 + mode: FP32 + resolution: (1333, 800) + Results: + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 33.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth diff --git a/configs/solo/solo_r50_fpn_1x_coco.py b/configs/solo/solo_r50_fpn_1x_coco.py new file mode 100644 index 0000000..9093a50 --- /dev/null +++ b/configs/solo/solo_r50_fpn_1x_coco.py @@ -0,0 +1,53 @@ +_base_ = [ + '../_base_/datasets/coco_instance.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='SOLO', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=0, + num_outs=5), + mask_head=dict( + type='SOLOHead', + num_classes=80, + in_channels=256, + stacked_convs=7, + feat_channels=256, + strides=[8, 8, 16, 32, 32], + scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), + pos_scale=0.2, + num_grids=[40, 36, 24, 16, 12], + cls_down_index=0, + loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), + # model training and testing settings + test_cfg=dict( + nms_pre=500, + score_thr=0.1, + mask_thr=0.5, + filter_thr=0.05, + kernel='gaussian', # gaussian/linear + sigma=2.0, + max_per_img=100)) + +# optimizer +optimizer = dict(type='SGD', lr=0.01) diff --git a/configs/solo/solo_r50_fpn_3x_coco.py b/configs/solo/solo_r50_fpn_3x_coco.py new file mode 100644 index 0000000..52302cd --- /dev/null +++ b/configs/solo/solo_r50_fpn_3x_coco.py @@ -0,0 +1,28 @@ +_base_ = './solo_r50_fpn_1x_coco.py' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), + (1333, 672), (1333, 640)], + multiscale_mode='value', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +data = dict(train=dict(pipeline=train_pipeline)) + +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[27, 33]) +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/configs/sparse_rcnn/README.md b/configs/sparse_rcnn/README.md index bd5f157..48c8aef 100644 --- a/configs/sparse_rcnn/README.md +++ b/configs/sparse_rcnn/README.md @@ -1,6 +1,20 @@ # Sparse R-CNN: End-to-End Object Detection with Learnable Proposals -## Introduction +## Abstract + + + +We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size H×W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3× training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. + + +
+ +
+ + + + +## Citation diff --git a/configs/ssd/README.md b/configs/ssd/README.md index fea4f7b..f668cd5 100644 --- a/configs/ssd/README.md +++ b/configs/ssd/README.md @@ -1,6 +1,20 @@ # SSD: Single Shot MultiBox Detector -## Introduction +## Abstract + + + +We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For 300×300 input, SSD achieves 72.1% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for 500×500 input, SSD achieves 75.1% mAP, outperforming a comparable state of the art Faster R-CNN model. + + +
+ +
+ + + + +## Citation diff --git a/configs/ssd/ssd300_coco.py b/configs/ssd/ssd300_coco.py index 1098187..2884658 100644 --- a/configs/ssd/ssd300_coco.py +++ b/configs/ssd/ssd300_coco.py @@ -7,14 +7,8 @@ data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -25,8 +19,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] diff --git a/configs/ssd/ssd512_coco.py b/configs/ssd/ssd512_coco.py index 3d5f346..82271c6 100644 --- a/configs/ssd/ssd512_coco.py +++ b/configs/ssd/ssd512_coco.py @@ -20,14 +20,8 @@ data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -38,8 +32,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] diff --git a/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py b/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py index 653fff1..c22d0a7 100644 --- a/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py +++ b/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py @@ -70,14 +70,8 @@ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -88,8 +82,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=320), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), diff --git a/configs/strong_baselines/README.md b/configs/strong_baselines/README.md new file mode 100644 index 0000000..5ada104 --- /dev/null +++ b/configs/strong_baselines/README.md @@ -0,0 +1,18 @@ +# Strong Baselines + +We train Mask R-CNN with large-scale jitter and longer schedule as strong baselines. +The modifications follow those in [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/configs/new_baselines). + +## Results and models + +| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: | +| R-50-FPN | pytorch | 50e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py) | [model]() | [log]() | +| R-50-FPN | pytorch | 100e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model]() | [log]() | +| R-50-FPN | caffe | 100e | | | 44.7 | 40.4 | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model]() | [log]() | +| R-50-FPN | caffe | 400e | | | | | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py) | [model]() | [log]() | + +## Notice + +When using large-scale jittering, there are sometimes empty proposals in the box and mask heads during training. +This requires MMSyncBN that allows empty tensors. Therefore, please use mmcv-full>=1.3.14 to train models supported in this directory. diff --git a/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py new file mode 100644 index 0000000..a40d6a0 --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py @@ -0,0 +1,80 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../common/lsj_100e_coco_instance.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + backbone=dict( + frozen_stages=-1, + norm_eval=False, + norm_cfg=norm_cfg, + init_cfg=None, + style='caffe'), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) + +file_client_args = dict(backend='disk') +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +image_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=image_size, + ratio_range=(0.1, 2.0), + multiscale_mode='range', + keep_ratio=True), + dict( + type='RandomCrop', + crop_type='absolute_range', + crop_size=image_size, + recompute_bbox=True, + allow_negative_crop=True), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=image_size), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile', file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] + +# Use RepeatDataset to speed up training +data = dict( + train=dict(dataset=dict(pipeline=train_pipeline)), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py new file mode 100644 index 0000000..31824eb --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py @@ -0,0 +1,2 @@ +_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' +fp16 = dict(loss_scale=512.) diff --git a/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py new file mode 100644 index 0000000..1211925 --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py @@ -0,0 +1,6 @@ +_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) +data = dict(train=dict(times=4 * 4)) +lr_config = dict(warmup_iters=500 * 4) diff --git a/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py new file mode 100644 index 0000000..4a15d69 --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/mask_rcnn_r50_fpn.py', + '../common/lsj_100e_coco_instance.py' +] + +norm_cfg = dict(type='SyncBN', requires_grad=True) +# Use MMSyncBN that handles empty tensor in head. It can be changed to +# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed +# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. +head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) +model = dict( + # the model is trained from scratch, so init_cfg is None + backbone=dict( + frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), + neck=dict(norm_cfg=norm_cfg), + rpn_head=dict(num_convs=2), # leads to 0.1+ mAP + roi_head=dict( + bbox_head=dict( + type='Shared4Conv1FCBBoxHead', + conv_out_channels=256, + norm_cfg=head_norm_cfg), + mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py new file mode 100644 index 0000000..7b97960 --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py @@ -0,0 +1,3 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' +# use FP16 +fp16 = dict(loss_scale=512.) diff --git a/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py new file mode 100644 index 0000000..922579a --- /dev/null +++ b/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py @@ -0,0 +1,5 @@ +_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' + +# Use RepeatDataset to speed up training +# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) +data = dict(train=dict(times=2)) diff --git a/configs/swin/README.md b/configs/swin/README.md new file mode 100644 index 0000000..9a4ef02 --- /dev/null +++ b/configs/swin/README.md @@ -0,0 +1,44 @@ +# Swin Transformer: Hierarchical Vision Transformer using Shifted Windows + +## Abstract + + + +This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. + + +
+ +
+ + + + +## Citation + + + +```latex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` + +## Results and models + +### Mask R-CNN + +| Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 |Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | +| :------: | :---------: | :-----: | :-------------------:| :------: |:------: | :------------: | :----: | :-----: | :------: | :--------: | +| Swin-T | ImageNet-1K | 1x | no | no | 7.6 | | 42.7 | 39.3 | [config](./mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | no | 10.2 | | 46.0 | 41.6 | [config](./mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725.log.json) | +| Swin-T | ImageNet-1K | 3x | yes | yes | 7.8 | | 46.0 | 41.7 | [config](./mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py)| [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006.log.json) | +| Swin-S | ImageNet-1K | 3x | yes | yes | 11.9 | | 48.2 | 43.2 | [config](./mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py)| [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808.log.json) | + +### Notice +Please follow the example +of `retinanet_swin-t-p4-w7_fpn_1x_coco.py` when you want to combine Swin Transformer with +the one-stage detector. Because there is a layer norm at the outs of Swin Transformer, you must set `start_level` as 0 in FPN, so we have to set the `out_indices` of backbone as `[1,2,3]`. diff --git a/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py b/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py index 237a169..15d50a0 100644 --- a/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py +++ b/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py @@ -1,5 +1,6 @@ _base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' -pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth' # noqa +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa model = dict( - backbone=dict(depths=[2, 2, 18, 2]), - init_cfg=dict(type='Pretrained', checkpoint=pretrained)) + backbone=dict( + depths=[2, 2, 18, 2], + init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py b/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py index 7f51ca4..337e858 100644 --- a/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py +++ b/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py @@ -3,7 +3,7 @@ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] -pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth' # noqa +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( @@ -22,6 +22,7 @@ patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, + convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) diff --git a/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py b/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py index 9124b18..2612f6e 100644 --- a/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py +++ b/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py @@ -4,7 +4,7 @@ '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] -pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth' # noqa +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', @@ -24,6 +24,7 @@ patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, + convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) diff --git a/configs/swin/metafile.yml b/configs/swin/metafile.yml new file mode 100644 index 0000000..b265afe --- /dev/null +++ b/configs/swin/metafile.yml @@ -0,0 +1,85 @@ +Collections: + - Name: Swin Transformer + Metadata: + Training Data: COCO + Training Techniques: + - AdamW + Training Resources: 8x V100 GPUs + Architecture: + - Swin Transformer + Paper: + URL: https://arxiv.org/abs/2107.08430 + Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' + README: configs/swin/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 + Version: v2.16.0 + +Models: + - Name: mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco + In Collection: Swin Transformer + Config: configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 11.9 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 48.2 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 43.2 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth + + - Name: mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco + In Collection: Swin Transformer + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth + + - Name: mask_rcnn_swin-t-p4-w7_fpn_1x_coco + In Collection: Swin Transformer + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.7 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 39.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth + + - Name: mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco + In Collection: Swin Transformer + Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py + Metadata: + Training Memory (GB): 7.8 + Epochs: 36 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.0 + - Task: Instance Segmentation + Dataset: COCO + Metrics: + mask AP: 41.7 + Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth diff --git a/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py b/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..9d620fd --- /dev/null +++ b/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/configs/tood/README.md b/configs/tood/README.md new file mode 100644 index 0000000..b1522e7 --- /dev/null +++ b/configs/tood/README.md @@ -0,0 +1,44 @@ +# TOOD: Task-aligned One-stage Object Detection + +## Abstract + + + +One-stage object detection is commonly implemented by optimizing two sub-tasks: object classification and localization, using heads with two parallel branches, which might lead to a certain level of spatial misalignment in predictions between the two tasks. In this work, we propose a Task-aligned One-stage Object Detection (TOOD) that explicitly aligns the two tasks in a learning-based manner. First, we design a novel Task-aligned Head (T-Head) which offers a better balance between learning task-interactive and task-specific features, as well as a greater flexibility to learn the alignment via a task-aligned predictor. Second, we propose Task Alignment Learning (TAL) to explicitly pull closer (or even unify) the optimal anchors for the two tasks during training via a designed sample assignment scheme and a task-aligned loss. Extensive experiments are conducted on MS-COCO, where TOOD achieves a 51.1 AP at single-model single-scale testing. This surpasses the recent one-stage detectors by a large margin, such as ATSS (47.7 AP), GFL (48.2 AP), and PAA (49.0 AP), with fewer parameters and FLOPs. Qualitative results also demonstrate the effectiveness of TOOD for better aligning the tasks of object classification and localization. + + +
+ +
+ + + + +## Citation + + + +```latex +@inproceedings{feng2021tood, + title={TOOD: Task-aligned One-stage Object Detection}, + author={Feng, Chengjian and Zhong, Yujie and Gao, Yu and Scott, Matthew R and Huang, Weilin}, + booktitle={ICCV}, + year={2021} +} +``` + +## Results and Models + +| Backbone | Style | Anchor Type | Lr schd | Multi-scale Training| Mem (GB)| Inf time (fps) | box AP | Config | Download | +|:-----------------:|:-------:|:------------:|:-------:|:-------------------:|:-------:|:--------------:|:------:|:------:|:--------:| +| R-50 | pytorch | Anchor-free | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425.log) | +| R-50 | pytorch | Anchor-based | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_anchor_based_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105.log) | +| R-50 | pytorch | Anchor-free | 2x | Y | 4.1 | | 44.5 | [config](./tood_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231.log) | +| R-101 | pytorch | Anchor-free | 2x | Y | 6.0 | | 46.1 | [config](./tood_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232.log) | +| R-101-dcnv2 | pytorch | Anchor-free | 2x | Y | 6.2 | | 49.3 | [config](./tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728.log) | +| X-101-64x4d | pytorch | Anchor-free | 2x | Y | 10.2 | | 47.6 | [config](./tood_x101_64x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519.log) | +| X-101-64x4d-dcnv2 | pytorch | Anchor-free | 2x | Y | | | | [config](./tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model]() | [log]() | + +[1] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ +[2] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ +[3] *`dcnv2` denotes deformable convolutional networks v2.* \ diff --git a/configs/tood/metafile.yml b/configs/tood/metafile.yml new file mode 100644 index 0000000..27a0f8d --- /dev/null +++ b/configs/tood/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: TOOD + Metadata: + Training Data: COCO + Training Techniques: + - SGD + Training Resources: 8x V100 GPUs + Architecture: + - TOOD + Paper: + URL: https://arxiv.org/abs/2108.07755 + Title: 'TOOD: Task-aligned One-stage Object Detection' + README: configs/tood/README.md + Code: + URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 + Version: v2.20.0 + +Models: + - Name: tood_r101_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 6.0 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 46.1 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth + + - Name: tood_x101_64x4d_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 10.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 47.6 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth + + - Name: tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 6.2 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.3 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth + + - Name: tood_r50_fpn_anchor_based_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_anchor_based_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth + + - Name: tood_r50_fpn_1x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_1x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 12 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 42.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth + + - Name: tood_r50_fpn_mstrain_2x_coco + In Collection: TOOD + Config: configs/tood/tood_r50_fpn_mstrain_2x_coco.py + Metadata: + Training Memory (GB): 4.1 + Epochs: 24 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 44.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth diff --git a/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py b/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..c7f1bbc --- /dev/null +++ b/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r101_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, True, True, True)), + bbox_head=dict(num_dcn=2)) diff --git a/configs/tood/tood_r101_fpn_mstrain_2x_coco.py b/configs/tood/tood_r101_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..d9d2c32 --- /dev/null +++ b/configs/tood/tood_r101_fpn_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_r50_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + depth=101, + init_cfg=dict(type='Pretrained', + checkpoint='torchvision://resnet101'))) diff --git a/configs/tood/tood_r50_fpn_1x_coco.py b/configs/tood/tood_r50_fpn_1x_coco.py new file mode 100644 index 0000000..35a77a4 --- /dev/null +++ b/configs/tood/tood_r50_fpn_1x_coco.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +model = dict( + type='TOOD', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5), + bbox_head=dict( + type='TOODHead', + num_classes=80, + in_channels=256, + stacked_convs=6, + feat_channels=256, + anchor_type='anchor_free', + anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + octave_base_scale=8, + scales_per_octave=1, + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + initial_loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_cls=dict( + type='QualityFocalLoss', + use_sigmoid=True, + activated=True, # use probability instead of logit as input + beta=2.0, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), + train_cfg=dict( + initial_epoch=4, + initial_assigner=dict(type='ATSSAssigner', topk=9), + assigner=dict(type='TaskAlignedAssigner', topk=13), + alpha=1, + beta=6, + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + +# custom hooks +custom_hooks = [dict(type='SetEpochInfoHook')] diff --git a/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py b/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py new file mode 100644 index 0000000..c7fbf6a --- /dev/null +++ b/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py @@ -0,0 +1,2 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +model = dict(bbox_head=dict(anchor_type='anchor_based')) diff --git a/configs/tood/tood_r50_fpn_mstrain_2x_coco.py b/configs/tood/tood_r50_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..157d13a --- /dev/null +++ b/configs/tood/tood_r50_fpn_mstrain_2x_coco.py @@ -0,0 +1,22 @@ +_base_ = './tood_r50_fpn_1x_coco.py' +# learning policy +lr_config = dict(step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24) +# multi-scale training +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', + img_scale=[(1333, 480), (1333, 800)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +data = dict(train=dict(pipeline=train_pipeline)) diff --git a/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py b/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py new file mode 100644 index 0000000..47c9269 --- /dev/null +++ b/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py @@ -0,0 +1,7 @@ +_base_ = './tood_x101_64x4d_fpn_mstrain_2x_coco.py' +model = dict( + backbone=dict( + dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + bbox_head=dict(num_dcn=2)) diff --git a/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py b/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py new file mode 100644 index 0000000..842f320 --- /dev/null +++ b/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py @@ -0,0 +1,16 @@ +_base_ = './tood_r50_fpn_mstrain_2x_coco.py' + +model = dict( + backbone=dict( + type='ResNeXt', + depth=101, + groups=64, + base_width=4, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/configs/tridentnet/README.md b/configs/tridentnet/README.md index b6263f2..6c43a3b 100644 --- a/configs/tridentnet/README.md +++ b/configs/tridentnet/README.md @@ -1,6 +1,20 @@ # Scale-Aware Trident Networks for Object Detection -## Introduction +## Abstract + + + +Scale variation is one of the key challenges in object detection. In this work, we first present a controlled experiment to investigate the effect of receptive fields for scale variation in object detection. Based on the findings from the exploration experiments, we propose a novel Trident Network (TridentNet) aiming to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. Then, we adopt a scale-aware training scheme to specialize each branch by sampling object instances of proper scales for training. As a bonus, a fast approximation version of TridentNet could achieve significant improvements without any additional parameters and computational cost compared with the vanilla detector. On the COCO dataset, our TridentNet with ResNet-101 backbone achieves state-of-the-art single-model results of 48.4 mAP. + + +
+ +
+ + + + +## Citation diff --git a/configs/vfnet/README.md b/configs/vfnet/README.md index 363f1b9..e57a1be 100644 --- a/configs/vfnet/README.md +++ b/configs/vfnet/README.md @@ -1,17 +1,27 @@ # VarifocalNet: An IoU-aware Dense Object Detector +## Abstract + + + +Accurately ranking the vast number of candidate detections is crucial for dense object detectors to achieve high performance. Prior work uses the classification score or a combination of classification and predicted localization scores to rank candidates. However, neither option results in a reliable ranking, thus degrading detection performance. In this paper, we propose to learn an Iou-aware Classification Score (IACS) as a joint representation of object presence confidence and localization accuracy. We show that dense object detectors can achieve a more accurate ranking of candidate detections based on the IACS. We design a new loss function, named Varifocal Loss, to train a dense object detector to predict the IACS, and propose a new star-shaped bounding box feature representation for IACS prediction and bounding box refinement. Combining these two new components and a bounding box refinement branch, we build an IoU-aware dense object detector based on the FCOS+ATSS architecture, that we call VarifocalNet or VFNet for short. Extensive experiments on MS COCO show that our VFNet consistently surpasses the strong baseline by ∼2.0 AP with different backbones. Our best model VFNet-X-1200 with Res2Net-101-DCN achieves a single-model single-scale AP of 55.1 on COCO test-dev, which is state-of-the-art among various object detectors. + + +
+ +
+ + + + ## Introduction **VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). -
- -

Learning to Predict the IoU-aware Classification Score.

-
-## Citing VarifocalNet +## Citation ```latex @article{zhang2020varifocalnet, @@ -26,14 +36,14 @@ | Backbone | Style | DCN | MS train | Lr schd |Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | |:------------:|:---------:|:-------:|:--------:|:-------:|:-------------:|:------------:|:-----------------:|:------:|:--------:| -| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json)| -| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json)| -| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json)| -| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json)| -| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| +| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json)| +| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json)| +| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| +| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json)| +| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json)| +| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| +| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| +| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| **Notes:** diff --git a/configs/vfnet/metafile.yml b/configs/vfnet/metafile.yml index 1d46b16..bcbe576 100644 --- a/configs/vfnet/metafile.yml +++ b/configs/vfnet/metafile.yml @@ -29,7 +29,7 @@ Models: Dataset: COCO Metrics: box AP: 41.6 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth - Name: vfnet_r50_fpn_mstrain_2x_coco In Collection: VFNet @@ -41,7 +41,7 @@ Models: Dataset: COCO Metrics: box AP: 44.8 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth - Name: vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet @@ -53,7 +53,7 @@ Models: Dataset: COCO Metrics: box AP: 48.0 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth - Name: vfnet_r101_fpn_1x_coco In Collection: VFNet @@ -65,7 +65,7 @@ Models: Dataset: COCO Metrics: box AP: 43.6 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth - Name: vfnet_r101_fpn_mstrain_2x_coco In Collection: VFNet @@ -77,7 +77,7 @@ Models: Dataset: COCO Metrics: box AP: 46.7 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth - Name: vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet @@ -89,7 +89,7 @@ Models: Dataset: COCO Metrics: box AP: 49.2 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth - Name: vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet @@ -101,7 +101,7 @@ Models: Dataset: COCO Metrics: box AP: 50.0 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth - Name: vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet @@ -113,4 +113,4 @@ Models: Dataset: COCO Metrics: box AP: 50.8 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth diff --git a/configs/wider_face/README.md b/configs/wider_face/README.md index b8fe474..2e6124c 100644 --- a/configs/wider_face/README.md +++ b/configs/wider_face/README.md @@ -1,4 +1,20 @@ -# WIDER Face Dataset +# WIDER FACE: A Face Detection Benchmark + +## Abstract + + + +Face detection is one of the most studied topics in the computer vision community. Much of the progresses have been made by the availability of face detection benchmark datasets. We show that there is a gap between current face detection performance and the real world requirements. To facilitate future face detection research, we introduce the WIDER FACE dataset, which is 10 times larger than existing datasets. The dataset contains rich annotations, including occlusions, poses, event categories, and face bounding boxes. Faces in the proposed dataset are extremely challenging due to large variations in scale, pose and occlusion, as shown in Fig. 1. Furthermore, we show that WIDER FACE dataset is an effective training source for face detection. We benchmark several representative detection systems, providing an overview of state-of-the-art performance and propose a solution to deal with large scale variation. Finally, we discuss common failure cases that worth to be further investigated. + + +
+ +
+ + + + +## Introduction @@ -33,6 +49,8 @@ mmdetection After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or create your own config based on the presented one. +## Citation + ``` @inproceedings{yang2016wider, Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, diff --git a/configs/yolact/README.md b/configs/yolact/README.md index da3559b..c4e3903 100644 --- a/configs/yolact/README.md +++ b/configs/yolact/README.md @@ -1,15 +1,22 @@ # **Y**ou **O**nly **L**ook **A**t **C**oefficien**T**s - +## Abstract -``` - ██╗ ██╗ ██████╗ ██╗ █████╗ ██████╗████████╗ - ╚██╗ ██╔╝██╔═══██╗██║ ██╔══██╗██╔════╝╚══██╔══╝ - ╚████╔╝ ██║ ██║██║ ███████║██║ ██║ - ╚██╔╝ ██║ ██║██║ ██╔══██║██║ ██║ - ██║ ╚██████╔╝███████╗██║ ██║╚██████╗ ██║ - ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ -``` + + +We present a simple, fully-convolutional model for real-time instance segmentation that achieves 29.8 mAP on MS COCO at 33.5 fps evaluated on a single Titan Xp, which is significantly faster than any previous competitive approach. Moreover, we obtain this result after training on only one GPU. We accomplish this by breaking instance segmentation into two parallel subtasks: (1) generating a set of prototype masks and (2) predicting per-instance mask coefficients. Then we produce instance masks by linearly combining the prototypes with the mask coefficients. We find that because this process doesn't depend on repooling, this approach produces very high-quality masks and exhibits temporal stability for free. Furthermore, we analyze the emergent behavior of our prototypes and show they learn to localize instances on their own in a translation variant manner, despite being fully-convolutional. Finally, we also propose Fast NMS, a drop-in 12 ms faster replacement for standard NMS that only has a marginal performance penalty. + + +
+ +
+ + + + +## Introduction + + A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: @@ -25,9 +32,9 @@ Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's | Image Size | GPU x BS | Backbone | *FPS | mAP | Weights | Configs | Download | |:----------:|:--------:|:-------------:|:-----:|:----:|:-------:|:------:|:--------:| -| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) |[model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco_20200908-f38d58df.pth) | -| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | -| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | +| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth) | +| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | +| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | *Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. diff --git a/configs/yolact/metafile.yml b/configs/yolact/metafile.yml index 159b2e0..e7019ae 100644 --- a/configs/yolact/metafile.yml +++ b/configs/yolact/metafile.yml @@ -36,7 +36,7 @@ Models: Dataset: COCO Metrics: mask AP: 29.0 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco_20200908-f38d58df.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth - Name: yolact_r50_8x8_coco In Collection: YOLACT @@ -55,7 +55,7 @@ Models: Dataset: COCO Metrics: mask AP: 28.4 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco_20200908-ca34f5db.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth - Name: yolact_r101_1x8_coco In Collection: YOLACT @@ -75,4 +75,4 @@ Models: Dataset: COCO Metrics: mask AP: 30.4 - Weights: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco_20200908-4cbe9101.pth + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth diff --git a/configs/yolact/yolact_r50_1x8_coco.py b/configs/yolact/yolact_r50_1x8_coco.py index 9d2103c..ceed0ca 100644 --- a/configs/yolact/yolact_r50_1x8_coco.py +++ b/configs/yolact/yolact_r50_1x8_coco.py @@ -90,15 +90,9 @@ img_norm_cfg = dict( mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -109,8 +103,14 @@ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] diff --git a/configs/yolo/README.md b/configs/yolo/README.md index 6c7ba55..93d14bf 100644 --- a/configs/yolo/README.md +++ b/configs/yolo/README.md @@ -1,6 +1,20 @@ -# YOLOv3 +# YOLOv3: An Incremental Improvement -## Introduction +## Abstract + + + +We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster. When we look at the old .5 IOU mAP detection metric YOLOv3 is quite good. It achieves 57.9 mAP@50 in 51 ms on a Titan X, compared to 57.5 mAP@50 in 198 ms by RetinaNet, similar performance but 3.8x faster. + + +
+ +
+ + + + +## Citation diff --git a/configs/yolo/yolov3_d53_320_273e_coco.py b/configs/yolo/yolov3_d53_320_273e_coco.py index 87359f6..d4785e3 100644 --- a/configs/yolo/yolov3_d53_320_273e_coco.py +++ b/configs/yolo/yolov3_d53_320_273e_coco.py @@ -2,9 +2,8 @@ # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -16,6 +15,7 @@ min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), diff --git a/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py b/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py index d029b5c..94325c5 100644 --- a/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py +++ b/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py @@ -2,9 +2,8 @@ # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -16,6 +15,7 @@ min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), diff --git a/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py b/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py index 83026b0..601f0fa 100644 --- a/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py +++ b/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py @@ -62,7 +62,6 @@ train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -74,6 +73,7 @@ min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), diff --git a/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py b/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py index faec1b4..477d253 100644 --- a/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py +++ b/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py @@ -13,9 +13,8 @@ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -27,6 +26,7 @@ min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), diff --git a/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py b/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py index 645c0ee..fc3663e 100644 --- a/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py +++ b/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py @@ -62,9 +62,8 @@ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), - dict(type='PhotoMetricDistortion'), dict( type='Expand', mean=img_norm_cfg['mean'], @@ -80,6 +79,7 @@ multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), + dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), diff --git a/configs/yolof/README.md b/configs/yolof/README.md index e16e8a2..6a53ed5 100644 --- a/configs/yolof/README.md +++ b/configs/yolof/README.md @@ -1,6 +1,20 @@ # You Only Look One-level Feature -## Introduction +## Abstract + + + +This paper revisits feature pyramids networks (FPN) for one-stage detectors and points out that the success of FPN is due to its divide-and-conquer solution to the optimization problem in object detection rather than multi-scale feature fusion. From the perspective of optimization, we introduce an alternative way to address the problem instead of adopting the complex feature pyramids - {\em utilizing only one-level feature for detection}. Based on the simple and efficient solution, we present You Only Look One-level Feature (YOLOF). In our method, two key components, Dilated Encoder and Uniform Matching, are proposed and bring considerable improvements. Extensive experiments on the COCO benchmark prove the effectiveness of the proposed model. Our YOLOF achieves comparable results with its feature pyramids counterpart RetinaNet while being 2.5× faster. Without transformer layers, YOLOF can match the performance of DETR in a single-level feature manner with 7× less training epochs. With an image size of 608×608, YOLOF achieves 44.3 mAP running at 60 fps on 2080Ti, which is 13% faster than YOLOv4. + + +
+ +
+ + + + +## Citation diff --git a/configs/yolox/README.md b/configs/yolox/README.md index dd00796..b836b82 100644 --- a/configs/yolox/README.md +++ b/configs/yolox/README.md @@ -1,6 +1,20 @@ # YOLOX: Exceeding YOLO Series in 2021 -## Introduction +## Abstract + + + +In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. + + +
+ +
+ + + + +## Citation @@ -17,9 +31,14 @@ | Backbone | size | Mem (GB) | box AP | Config | Download | |:---------:|:-------:|:-------:|:-------:|:--------:|:------:| -| YOLOX-Tiny | 416 | 3.6 | 31.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250.log.json) | +| YOLOX-tiny | 416 | 3.5 | 32.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | +| YOLOX-s | 640 | 7.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_s_8x8_300e_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | +| YOLOX-l | 640 | 19.9 | 49.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_l_8x8_300e_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | +| YOLOX-x | 640 | 28.1 | 50.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_x_8x8_300e_coco.py) |[model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | + **Note**: -1. The test score threshold is 0.001. -2. We find that the performance is unstable and may fluctuate by about 0.7 mAP. We will continue to investigate and improve it. +1. The test score threshold is 0.001, and the box AP indicates the best AP. +2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. +3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). diff --git a/configs/yolox/metafile.yml b/configs/yolox/metafile.yml index 22ad6cc..845cb0a 100644 --- a/configs/yolox/metafile.yml +++ b/configs/yolox/metafile.yml @@ -18,16 +18,53 @@ Collections: URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 Version: v2.15.1 + Models: + - Name: yolox_s_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_s_8x8_300e_coco.py + Metadata: + Training Memory (GB): 7.6 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 40.5 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth + - Name: yolox_l_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_l_8x8_300e_coco.py + Metadata: + Training Memory (GB): 19.9 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 49.4 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth + - Name: yolox_x_8x8_300e_coco + In Collection: YOLOX + Config: configs/yolox/yolox_x_8x8_300e_coco.py + Metadata: + Training Memory (GB): 28.1 + Epochs: 300 + Results: + - Task: Object Detection + Dataset: COCO + Metrics: + box AP: 50.9 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth - Name: yolox_tiny_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_tiny_8x8_300e_coco.py Metadata: - Training Memory (GB): 3.6 + Training Memory (GB): 3.5 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: - box AP: 31.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth + box AP: 32.0 + Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth diff --git a/configs/yolox/yolox_s_8x8_300e_coco.py b/configs/yolox/yolox_s_8x8_300e_coco.py index 7137495..cc73051 100644 --- a/configs/yolox/yolox_s_8x8_300e_coco.py +++ b/configs/yolox/yolox_s_8x8_300e_coco.py @@ -1,8 +1,13 @@ _base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] +img_scale = (640, 640) + # model settings model = dict( type='YOLOX', + input_size=img_scale, + random_size_range=(15, 25), + random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), neck=dict( type='YOLOXPAFPN', @@ -20,11 +25,6 @@ data_root = 'data/coco/' dataset_type = 'CocoDataset' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -img_scale = (640, 640) - train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( @@ -36,16 +36,19 @@ img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), + dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Resize', keep_ratio=True), - dict(type='Pad', pad_to_square=True, pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), + # According to the official implementation, multi-scale + # training is not considered here but in the + # 'mmdet/models/detectors/yolox.py'. + dict(type='Resize', img_scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + # If the image is three-channel, the pad value needs + # to be set separately for each channel. + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] @@ -57,13 +60,12 @@ ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ - dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), - pipeline=train_pipeline, - dynamic_scale=img_scale) + pipeline=train_pipeline) test_pipeline = [ dict(type='LoadImageFromFile'), @@ -74,8 +76,10 @@ transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), - dict(type='Pad', size=img_scale, pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) @@ -83,7 +87,8 @@ data = dict( samples_per_gpu=8, - workers_per_gpu=2, + workers_per_gpu=4, + persistent_workers=True, train=train_dataset, val=dict( type=dataset_type, @@ -107,6 +112,11 @@ paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) +max_epochs = 300 +num_last_epochs = 15 +resume_from = None +interval = 10 + # learning policy lr_config = dict( _delete_=True, @@ -116,28 +126,35 @@ warmup_by_epoch=True, warmup_ratio=1, warmup_iters=5, # 5 epoch - num_last_epochs=15, + num_last_epochs=num_last_epochs, min_lr_ratio=0.05) -runner = dict(type='EpochBasedRunner', max_epochs=300) -resume_from = None -interval = 10 +runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) custom_hooks = [ - dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48), dict( - type='SyncRandomSizeHook', - ratio_range=(14, 26), - img_scale=img_scale, - interval=interval, + type='YOLOXModeSwitchHook', + num_last_epochs=num_last_epochs, priority=48), dict( type='SyncNormHook', - num_last_epochs=15, + num_last_epochs=num_last_epochs, interval=interval, priority=48), - dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49) + dict( + type='ExpMomentumEMAHook', + resume_from=resume_from, + momentum=0.0001, + priority=49) ] checkpoint_config = dict(interval=interval) -evaluation = dict(interval=interval, metric='bbox') +evaluation = dict( + save_best='auto', + # The evaluation interval is 'interval' when running epoch is + # less than ‘max_epochs - num_last_epochs’. + # The evaluation interval is 1 when running epoch is greater than + # or equal to ‘max_epochs - num_last_epochs’. + interval=interval, + dynamic_intervals=[(max_epochs - num_last_epochs, 1)], + metric='bbox') log_config = dict(interval=50) diff --git a/configs/yolox/yolox_tiny_8x8_300e_coco.py b/configs/yolox/yolox_tiny_8x8_300e_coco.py index 4d517cb..3aee99a 100644 --- a/configs/yolox/yolox_tiny_8x8_300e_coco.py +++ b/configs/yolox/yolox_tiny_8x8_300e_coco.py @@ -2,14 +2,11 @@ # model settings model = dict( + random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - img_scale = (640, 640) train_pipeline = [ @@ -18,16 +15,14 @@ type='RandomAffine', scaling_ratio_range=(0.5, 1.5), border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), + dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Resize', keep_ratio=True), - dict(type='Pad', pad_to_square=True, pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), + dict(type='Resize', img_scale=img_scale, keep_ratio=True), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), + dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] @@ -41,8 +36,10 @@ transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), - dict(type='Pad', size=(416, 416), pad_val=114.0), - dict(type='Normalize', **img_norm_cfg), + dict( + type='Pad', + pad_to_square=True, + pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) @@ -54,26 +51,3 @@ train=train_dataset, val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) - -resume_from = None -interval = 10 - -# Execute in the order of insertion when the priority is the same. -# The smaller the value, the higher the priority -custom_hooks = [ - dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48), - dict( - type='SyncRandomSizeHook', - ratio_range=(10, 20), - img_scale=img_scale, - interval=interval, - priority=48), - dict( - type='SyncNormHook', - num_last_epochs=15, - interval=interval, - priority=48), - dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49) -] -checkpoint_config = dict(interval=interval) -evaluation = dict(interval=interval, metric='bbox') From 129566e0dc03d0ba625e736c7a5371fd7c11f8f9 Mon Sep 17 00:00:00 2001 From: dnth Date: Wed, 26 Jan 2022 13:55:11 +0800 Subject: [PATCH 2/3] add custom configs with swin backbone for retinanet and vfnet --- .../retinanet_swin-b-p4-w7_fpn_1x_coco.py | 16 +++ .../retinanet_swin-s-p4-w7_fpn_1x_coco.py | 10 ++ .../retinanet_swin-t-p4-w7_fpn_1x_coco.py | 30 +++++ .../vfnet/vfnet_swin-b-p4-w7_fpn_1x_coco.py | 16 +++ .../vfnet/vfnet_swin-s-p4-w7_fpn_1x_coco.py | 10 ++ .../vfnet/vfnet_swin-t-p4-w7_fpn_1x_coco.py | 118 ++++++++++++++++++ 6 files changed, 200 insertions(+) create mode 100644 custom_configs/retinanet/retinanet_swin-b-p4-w7_fpn_1x_coco.py create mode 100644 custom_configs/retinanet/retinanet_swin-s-p4-w7_fpn_1x_coco.py create mode 100644 custom_configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py create mode 100644 custom_configs/vfnet/vfnet_swin-b-p4-w7_fpn_1x_coco.py create mode 100644 custom_configs/vfnet/vfnet_swin-s-p4-w7_fpn_1x_coco.py create mode 100644 custom_configs/vfnet/vfnet_swin-t-p4-w7_fpn_1x_coco.py diff --git a/custom_configs/retinanet/retinanet_swin-b-p4-w7_fpn_1x_coco.py b/custom_configs/retinanet/retinanet_swin-b-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..0835db6 --- /dev/null +++ b/custom_configs/retinanet/retinanet_swin-b-p4-w7_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = ["./retinanet_swin-t-p4-w7_fpn_1x_coco.py"] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained), + ), + neck=dict( + in_channels=[128, 256, 512, 1024], + ), +) diff --git a/custom_configs/retinanet/retinanet_swin-s-p4-w7_fpn_1x_coco.py b/custom_configs/retinanet/retinanet_swin-s-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..80fa394 --- /dev/null +++ b/custom_configs/retinanet/retinanet_swin-s-p4-w7_fpn_1x_coco.py @@ -0,0 +1,10 @@ +_base_ = ["./retinanet_swin-t-p4-w7_fpn_1x_coco.py"] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + depths=[2, 2, 18, 2], + init_cfg=dict(type='Pretrained', checkpoint=pretrained), + ), +) diff --git a/custom_configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py b/custom_configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..3315093 --- /dev/null +++ b/custom_configs/retinanet/retinanet_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/retinanet_r50_fpn.py', + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +model = dict( + backbone=dict( + _delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) + +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/custom_configs/vfnet/vfnet_swin-b-p4-w7_fpn_1x_coco.py b/custom_configs/vfnet/vfnet_swin-b-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..f8405fe --- /dev/null +++ b/custom_configs/vfnet/vfnet_swin-b-p4-w7_fpn_1x_coco.py @@ -0,0 +1,16 @@ +_base_ = ["./vfnet_swin-t-p4-w7_fpn_1x_coco.py"] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + embed_dims=128, + depths=[2, 2, 18, 2], + num_heads=[4, 8, 16, 32], + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained), + ), + neck=dict( + in_channels=[128, 256, 512, 1024], + ), +) diff --git a/custom_configs/vfnet/vfnet_swin-s-p4-w7_fpn_1x_coco.py b/custom_configs/vfnet/vfnet_swin-s-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..8b0011f --- /dev/null +++ b/custom_configs/vfnet/vfnet_swin-s-p4-w7_fpn_1x_coco.py @@ -0,0 +1,10 @@ +_base_ = ["./vfnet_swin-t-p4-w7_fpn_1x_coco.py"] + +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa + +model = dict( + backbone=dict( + depths=[2, 2, 18, 2], + init_cfg=dict(type='Pretrained', checkpoint=pretrained), + ), +) diff --git a/custom_configs/vfnet/vfnet_swin-t-p4-w7_fpn_1x_coco.py b/custom_configs/vfnet/vfnet_swin-t-p4-w7_fpn_1x_coco.py new file mode 100644 index 0000000..4d4796d --- /dev/null +++ b/custom_configs/vfnet/vfnet_swin-t-p4-w7_fpn_1x_coco.py @@ -0,0 +1,118 @@ +_base_ = [ + '../_base_/datasets/coco_detection.py', + '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' +] +pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa +# model settings +model = dict( + type='VFNet', + backbone=dict( + #_delete_=True, + type='SwinTransformer', + embed_dims=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + patch_norm=True, + out_indices=(1, 2, 3), + # Please only add indices that would be used + # in FPN, otherwise some parameter will not be used + with_cp=False, + convert_weights=True, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + neck=dict( + type='FPN', + in_channels=[192, 384, 768], + out_channels=256, + start_level=0, + add_extra_convs='on_output', # use P5 + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='VFNetHead', + num_classes=80, + in_channels=256, + stacked_convs=3, + feat_channels=256, + strides=[8, 16, 32, 64, 128], + center_sampling=False, + dcn_on_last_conv=False, + use_atss=True, + use_vfl=True, + loss_cls=dict( + type='VarifocalLoss', + use_sigmoid=True, + alpha=0.75, + gamma=2.0, + iou_weighted=True, + loss_weight=1.0), + loss_bbox=dict(type='GIoULoss', loss_weight=1.5), + loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), + # training and testing settings + train_cfg=dict( + assigner=dict(type='ATSSAssigner', topk=9), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + +# data setting +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) + +# optimizer +optimizer = dict( + lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.1, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) From d2b4910cd5195ac6b4daf354d94c6bd202ad583f Mon Sep 17 00:00:00 2001 From: dnth Date: Wed, 26 Jan 2022 13:57:24 +0800 Subject: [PATCH 3/3] add __base__ folder --- .../_base_/datasets/cityscapes_detection.py | 56 +++++ .../_base_/datasets/cityscapes_instance.py | 56 +++++ .../_base_/datasets/coco_detection.py | 49 +++++ .../_base_/datasets/coco_instance.py | 49 +++++ .../_base_/datasets/coco_instance_semantic.py | 54 +++++ .../_base_/datasets/coco_panoptic.py | 59 ++++++ custom_configs/_base_/datasets/deepfashion.py | 53 +++++ .../_base_/datasets/lvis_v0.5_instance.py | 24 +++ .../_base_/datasets/lvis_v1_instance.py | 24 +++ custom_configs/_base_/datasets/voc0712.py | 55 +++++ custom_configs/_base_/datasets/wider_face.py | 63 ++++++ custom_configs/_base_/default_runtime.py | 16 ++ .../models/cascade_mask_rcnn_r50_fpn.py | 196 ++++++++++++++++++ .../_base_/models/cascade_rcnn_r50_fpn.py | 179 ++++++++++++++++ .../_base_/models/fast_rcnn_r50_fpn.py | 62 ++++++ .../_base_/models/faster_rcnn_r50_caffe_c4.py | 114 ++++++++++ .../models/faster_rcnn_r50_caffe_dc5.py | 105 ++++++++++ .../_base_/models/faster_rcnn_r50_fpn.py | 108 ++++++++++ .../_base_/models/mask_rcnn_r50_caffe_c4.py | 125 +++++++++++ .../_base_/models/mask_rcnn_r50_fpn.py | 120 +++++++++++ .../_base_/models/retinanet_r50_fpn.py | 60 ++++++ .../_base_/models/rpn_r50_caffe_c4.py | 58 ++++++ custom_configs/_base_/models/rpn_r50_fpn.py | 58 ++++++ custom_configs/_base_/models/ssd300.py | 56 +++++ .../_base_/schedules/schedule_1x.py | 11 + .../_base_/schedules/schedule_20e.py | 11 + .../_base_/schedules/schedule_2x.py | 11 + 27 files changed, 1832 insertions(+) create mode 100644 custom_configs/_base_/datasets/cityscapes_detection.py create mode 100644 custom_configs/_base_/datasets/cityscapes_instance.py create mode 100644 custom_configs/_base_/datasets/coco_detection.py create mode 100644 custom_configs/_base_/datasets/coco_instance.py create mode 100644 custom_configs/_base_/datasets/coco_instance_semantic.py create mode 100644 custom_configs/_base_/datasets/coco_panoptic.py create mode 100644 custom_configs/_base_/datasets/deepfashion.py create mode 100644 custom_configs/_base_/datasets/lvis_v0.5_instance.py create mode 100644 custom_configs/_base_/datasets/lvis_v1_instance.py create mode 100644 custom_configs/_base_/datasets/voc0712.py create mode 100644 custom_configs/_base_/datasets/wider_face.py create mode 100644 custom_configs/_base_/default_runtime.py create mode 100644 custom_configs/_base_/models/cascade_mask_rcnn_r50_fpn.py create mode 100644 custom_configs/_base_/models/cascade_rcnn_r50_fpn.py create mode 100644 custom_configs/_base_/models/fast_rcnn_r50_fpn.py create mode 100644 custom_configs/_base_/models/faster_rcnn_r50_caffe_c4.py create mode 100644 custom_configs/_base_/models/faster_rcnn_r50_caffe_dc5.py create mode 100644 custom_configs/_base_/models/faster_rcnn_r50_fpn.py create mode 100644 custom_configs/_base_/models/mask_rcnn_r50_caffe_c4.py create mode 100644 custom_configs/_base_/models/mask_rcnn_r50_fpn.py create mode 100644 custom_configs/_base_/models/retinanet_r50_fpn.py create mode 100644 custom_configs/_base_/models/rpn_r50_caffe_c4.py create mode 100644 custom_configs/_base_/models/rpn_r50_fpn.py create mode 100644 custom_configs/_base_/models/ssd300.py create mode 100644 custom_configs/_base_/schedules/schedule_1x.py create mode 100644 custom_configs/_base_/schedules/schedule_20e.py create mode 100644 custom_configs/_base_/schedules/schedule_2x.py diff --git a/custom_configs/_base_/datasets/cityscapes_detection.py b/custom_configs/_base_/datasets/cityscapes_detection.py new file mode 100644 index 0000000..e341b59 --- /dev/null +++ b/custom_configs/_base_/datasets/cityscapes_detection.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_train.json', + img_prefix=data_root + 'leftImg8bit/train/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_val.json', + img_prefix=data_root + 'leftImg8bit/val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_test.json', + img_prefix=data_root + 'leftImg8bit/test/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/custom_configs/_base_/datasets/cityscapes_instance.py b/custom_configs/_base_/datasets/cityscapes_instance.py new file mode 100644 index 0000000..4e3c34e --- /dev/null +++ b/custom_configs/_base_/datasets/cityscapes_instance.py @@ -0,0 +1,56 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=8, + dataset=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_train.json', + img_prefix=data_root + 'leftImg8bit/train/', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_val.json', + img_prefix=data_root + 'leftImg8bit/val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/instancesonly_filtered_gtFine_test.json', + img_prefix=data_root + 'leftImg8bit/test/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/coco_detection.py b/custom_configs/_base_/datasets/coco_detection.py new file mode 100644 index 0000000..149f590 --- /dev/null +++ b/custom_configs/_base_/datasets/coco_detection.py @@ -0,0 +1,49 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='bbox') diff --git a/custom_configs/_base_/datasets/coco_instance.py b/custom_configs/_base_/datasets/coco_instance.py new file mode 100644 index 0000000..9901a85 --- /dev/null +++ b/custom_configs/_base_/datasets/coco_instance.py @@ -0,0 +1,49 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/coco_instance_semantic.py b/custom_configs/_base_/datasets/coco_instance_semantic.py new file mode 100644 index 0000000..6c8bf07 --- /dev/null +++ b/custom_configs/_base_/datasets/coco_instance_semantic.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 8), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + seg_prefix=data_root + 'stuffthingmaps/train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/coco_panoptic.py b/custom_configs/_base_/datasets/coco_panoptic.py new file mode 100644 index 0000000..dbade7c --- /dev/null +++ b/custom_configs/_base_/datasets/coco_panoptic.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'CocoPanopticDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='LoadPanopticAnnotations', + with_bbox=True, + with_mask=True, + with_seg=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='SegRescale', scale_factor=1 / 4), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_train2017.json', + img_prefix=data_root + 'train2017/', + seg_prefix=data_root + 'annotations/panoptic_train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_val2017.json', + img_prefix=data_root + 'val2017/', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/panoptic_val2017.json', + img_prefix=data_root + 'val2017/', + seg_prefix=data_root + 'annotations/panoptic_val2017/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric=['PQ']) diff --git a/custom_configs/_base_/datasets/deepfashion.py b/custom_configs/_base_/datasets/deepfashion.py new file mode 100644 index 0000000..308b4b2 --- /dev/null +++ b/custom_configs/_base_/datasets/deepfashion.py @@ -0,0 +1,53 @@ +# dataset settings +dataset_type = 'DeepFashionDataset' +data_root = 'data/DeepFashion/In-shop/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(750, 1101), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + imgs_per_gpu=2, + workers_per_gpu=1, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', + img_prefix=data_root + 'Img/', + pipeline=train_pipeline, + data_root=data_root), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', + img_prefix=data_root + 'Img/', + pipeline=test_pipeline, + data_root=data_root), + test=dict( + type=dataset_type, + ann_file=data_root + + 'annotations/DeepFashion_segmentation_gallery.json', + img_prefix=data_root + 'Img/', + pipeline=test_pipeline, + data_root=data_root)) +evaluation = dict(interval=5, metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/lvis_v0.5_instance.py b/custom_configs/_base_/datasets/lvis_v0.5_instance.py new file mode 100644 index 0000000..207e005 --- /dev/null +++ b/custom_configs/_base_/datasets/lvis_v0.5_instance.py @@ -0,0 +1,24 @@ +# dataset settings +_base_ = 'coco_instance.py' +dataset_type = 'LVISV05Dataset' +data_root = 'data/lvis_v0.5/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_train.json', + img_prefix=data_root + 'train2017/')), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_val.json', + img_prefix=data_root + 'val2017/'), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v0.5_val.json', + img_prefix=data_root + 'val2017/')) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/lvis_v1_instance.py b/custom_configs/_base_/datasets/lvis_v1_instance.py new file mode 100644 index 0000000..be791ed --- /dev/null +++ b/custom_configs/_base_/datasets/lvis_v1_instance.py @@ -0,0 +1,24 @@ +# dataset settings +_base_ = 'coco_instance.py' +dataset_type = 'LVISV1Dataset' +data_root = 'data/lvis_v1/' +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + _delete_=True, + type='ClassBalancedDataset', + oversample_thr=1e-3, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_train.json', + img_prefix=data_root)), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/lvis_v1_val.json', + img_prefix=data_root)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/custom_configs/_base_/datasets/voc0712.py b/custom_configs/_base_/datasets/voc0712.py new file mode 100644 index 0000000..ae09acd --- /dev/null +++ b/custom_configs/_base_/datasets/voc0712.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'VOCDataset' +data_root = 'data/VOCdevkit/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1000, 600), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=3, + dataset=dict( + type=dataset_type, + ann_file=[ + data_root + 'VOC2007/ImageSets/Main/trainval.txt', + data_root + 'VOC2012/ImageSets/Main/trainval.txt' + ], + img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', + img_prefix=data_root + 'VOC2007/', + pipeline=test_pipeline)) +evaluation = dict(interval=1, metric='mAP') diff --git a/custom_configs/_base_/datasets/wider_face.py b/custom_configs/_base_/datasets/wider_face.py new file mode 100644 index 0000000..d1d649b --- /dev/null +++ b/custom_configs/_base_/datasets/wider_face.py @@ -0,0 +1,63 @@ +# dataset settings +dataset_type = 'WIDERFaceDataset' +data_root = 'data/WIDERFace/' +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PhotoMetricDistortion', + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 4)), + dict( + type='MinIoURandomCrop', + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3), + dict(type='Resize', img_scale=(300, 300), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(300, 300), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=60, + workers_per_gpu=2, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + ann_file=data_root + 'train.txt', + img_prefix=data_root + 'WIDER_train/', + min_size=17, + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + ann_file=data_root + 'val.txt', + img_prefix=data_root + 'WIDER_val/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'val.txt', + img_prefix=data_root + 'WIDER_val/', + pipeline=test_pipeline)) diff --git a/custom_configs/_base_/default_runtime.py b/custom_configs/_base_/default_runtime.py new file mode 100644 index 0000000..55097c5 --- /dev/null +++ b/custom_configs/_base_/default_runtime.py @@ -0,0 +1,16 @@ +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +custom_hooks = [dict(type='NumClassCheckHook')] + +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/custom_configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/custom_configs/_base_/models/cascade_mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..2902cca --- /dev/null +++ b/custom_configs/_base_/models/cascade_mask_rcnn_r50_fpn.py @@ -0,0 +1,196 @@ +# model settings +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/custom_configs/_base_/models/cascade_rcnn_r50_fpn.py b/custom_configs/_base_/models/cascade_rcnn_r50_fpn.py new file mode 100644 index 0000000..42f74ae --- /dev/null +++ b/custom_configs/_base_/models/cascade_rcnn_r50_fpn.py @@ -0,0 +1,179 @@ +# model settings +model = dict( + type='CascadeRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/custom_configs/_base_/models/fast_rcnn_r50_fpn.py b/custom_configs/_base_/models/fast_rcnn_r50_fpn.py new file mode 100644 index 0000000..9982fe0 --- /dev/null +++ b/custom_configs/_base_/models/fast_rcnn_r50_fpn.py @@ -0,0 +1,62 @@ +# model settings +model = dict( + type='FastRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/custom_configs/_base_/models/faster_rcnn_r50_caffe_c4.py b/custom_configs/_base_/models/faster_rcnn_r50_caffe_c4.py new file mode 100644 index 0000000..51b5db4 --- /dev/null +++ b/custom_configs/_base_/models/faster_rcnn_r50_caffe_c4.py @@ -0,0 +1,114 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/custom_configs/_base_/models/faster_rcnn_r50_caffe_dc5.py b/custom_configs/_base_/models/faster_rcnn_r50_caffe_dc5.py new file mode 100644 index 0000000..a377a6f --- /dev/null +++ b/custom_configs/_base_/models/faster_rcnn_r50_caffe_dc5.py @@ -0,0 +1,105 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + strides=(1, 2, 2, 1), + dilations=(1, 1, 1, 2), + out_indices=(3, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=2048, + feat_channels=2048, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=2048, + featmap_strides=[16]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=2048, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms=dict(type='nms', iou_threshold=0.7), + nms_pre=6000, + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/custom_configs/_base_/models/faster_rcnn_r50_fpn.py b/custom_configs/_base_/models/faster_rcnn_r50_fpn.py new file mode 100644 index 0000000..1ef8e7b --- /dev/null +++ b/custom_configs/_base_/models/faster_rcnn_r50_fpn.py @@ -0,0 +1,108 @@ +# model settings +model = dict( + type='FasterRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100) + # soft-nms is also supported for rcnn testing + # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) + )) diff --git a/custom_configs/_base_/models/mask_rcnn_r50_caffe_c4.py b/custom_configs/_base_/models/mask_rcnn_r50_caffe_c4.py new file mode 100644 index 0000000..122202e --- /dev/null +++ b/custom_configs/_base_/models/mask_rcnn_r50_caffe_c4.py @@ -0,0 +1,125 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=False) +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=norm_cfg, + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + shared_head=dict( + type='ResLayer', + depth=50, + stage=3, + stride=2, + dilation=1, + style='caffe', + norm_cfg=norm_cfg, + norm_eval=True), + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=1024, + featmap_strides=[16]), + bbox_head=dict( + type='BBoxHead', + with_avg_pool=True, + roi_feat_size=7, + in_channels=2048, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=None, + mask_head=dict( + type='FCNMaskHead', + num_convs=0, + in_channels=2048, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=14, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=6000, + nms=dict(type='nms', iou_threshold=0.7), + max_per_img=1000, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/custom_configs/_base_/models/mask_rcnn_r50_fpn.py b/custom_configs/_base_/models/mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..d903e55 --- /dev/null +++ b/custom_configs/_base_/models/mask_rcnn_r50_fpn.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='MaskRCNN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/custom_configs/_base_/models/retinanet_r50_fpn.py b/custom_configs/_base_/models/retinanet_r50_fpn.py new file mode 100644 index 0000000..56e43fa --- /dev/null +++ b/custom_configs/_base_/models/retinanet_r50_fpn.py @@ -0,0 +1,60 @@ +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=80, + in_channels=256, + stacked_convs=4, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) diff --git a/custom_configs/_base_/models/rpn_r50_caffe_c4.py b/custom_configs/_base_/models/rpn_r50_caffe_c4.py new file mode 100644 index 0000000..8b32ca9 --- /dev/null +++ b/custom_configs/_base_/models/rpn_r50_caffe_c4.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='RPN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=3, + strides=(1, 2, 2), + dilations=(1, 1, 1), + out_indices=(2, ), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe', + init_cfg=dict( + type='Pretrained', + checkpoint='open-mmlab://detectron2/resnet50_caffe')), + neck=None, + rpn_head=dict( + type='RPNHead', + in_channels=1024, + feat_channels=1024, + anchor_generator=dict( + type='AnchorGenerator', + scales=[2, 4, 8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[16]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=12000, + max_per_img=2000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/custom_configs/_base_/models/rpn_r50_fpn.py b/custom_configs/_base_/models/rpn_r50_fpn.py new file mode 100644 index 0000000..edaf4d4 --- /dev/null +++ b/custom_configs/_base_/models/rpn_r50_fpn.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='RPN', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_pre=2000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0))) diff --git a/custom_configs/_base_/models/ssd300.py b/custom_configs/_base_/models/ssd300.py new file mode 100644 index 0000000..f17df01 --- /dev/null +++ b/custom_configs/_base_/models/ssd300.py @@ -0,0 +1,56 @@ +# model settings +input_size = 300 +model = dict( + type='SingleStageDetector', + backbone=dict( + type='SSDVGG', + depth=16, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), + neck=dict( + type='SSDNeck', + in_channels=(512, 1024), + out_channels=(512, 1024, 512, 256, 256, 256), + level_strides=(2, 2, 1, 1), + level_paddings=(1, 1, 0, 0), + l2_norm_scale=20), + bbox_head=dict( + type='SSDHead', + in_channels=(512, 1024, 512, 256, 256, 256), + num_classes=80, + anchor_generator=dict( + type='SSDAnchorGenerator', + scale_major=False, + input_size=input_size, + basesize_ratio_range=(0.15, 0.9), + strides=[8, 16, 32, 64, 100, 300], + ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[0.1, 0.1, 0.2, 0.2])), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0., + ignore_iof_thr=-1, + gt_max_assign_all=False), + smoothl1_beta=1., + allowed_border=-1, + pos_weight=-1, + neg_pos_ratio=3, + debug=False), + test_cfg=dict( + nms_pre=1000, + nms=dict(type='nms', iou_threshold=0.45), + min_bbox_size=0, + score_thr=0.02, + max_per_img=200)) +cudnn_benchmark = True diff --git a/custom_configs/_base_/schedules/schedule_1x.py b/custom_configs/_base_/schedules/schedule_1x.py new file mode 100644 index 0000000..13b3783 --- /dev/null +++ b/custom_configs/_base_/schedules/schedule_1x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/custom_configs/_base_/schedules/schedule_20e.py b/custom_configs/_base_/schedules/schedule_20e.py new file mode 100644 index 0000000..00e8590 --- /dev/null +++ b/custom_configs/_base_/schedules/schedule_20e.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 19]) +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/custom_configs/_base_/schedules/schedule_2x.py b/custom_configs/_base_/schedules/schedule_2x.py new file mode 100644 index 0000000..69dc9ee --- /dev/null +++ b/custom_configs/_base_/schedules/schedule_2x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[16, 22]) +runner = dict(type='EpochBasedRunner', max_epochs=24)