Skip to content

Commit

Permalink
support refcocog eval (#10477)
Browse files Browse the repository at this point in the history
  • Loading branch information
xiexinch committed Jun 9, 2023
1 parent 88884b4 commit ad3b574
Show file tree
Hide file tree
Showing 7 changed files with 214 additions and 127 deletions.
47 changes: 14 additions & 33 deletions configs/_base_/datasets/refcoco+.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,24 @@
# dataset settings
dataset_type = 'RefCOCODataset'
data_root = 'data/refcoco/'
data_root = 'data/coco/'

backend_args = None

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]

test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_mask=True,
with_bbox=False,
with_seg=False,
with_label=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
'scale_factor', 'gt_masks', 'text'))
]

train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='train',
pipeline=train_pipeline,
backend_args=backend_args))

val_dataloader = dict(
batch_size=1,
num_workers=2,
Expand All @@ -52,8 +32,8 @@
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='val',
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

test_dataloader = dict(
batch_size=1,
Expand All @@ -68,7 +48,8 @@
ann_file='refcoco+/instances.json',
split_file='refcoco+/refs(unc).p',
split='testA', # or 'testB'
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

# TODO: set the metrics
val_evaluator = dict(type='RefSegMetric', metrics=['cIoU', 'mIoU'])
test_evaluator = val_evaluator
47 changes: 14 additions & 33 deletions configs/_base_/datasets/refcoco.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,24 @@
# dataset settings
dataset_type = 'RefCOCODataset'
data_root = 'data/refcoco/'
data_root = 'data/coco/'

backend_args = None

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]

test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_mask=True,
with_bbox=False,
with_seg=False,
with_label=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
'scale_factor', 'gt_masks', 'text'))
]

train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='train',
pipeline=train_pipeline,
backend_args=backend_args))

val_dataloader = dict(
batch_size=1,
num_workers=2,
Expand All @@ -52,8 +32,8 @@
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='val',
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

test_dataloader = dict(
batch_size=1,
Expand All @@ -68,7 +48,8 @@
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='testA', # or 'testB'
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

# TODO: set the metrics
val_evaluator = dict(type='RefSegMetric', metrics=['cIoU', 'mIoU'])
test_evaluator = val_evaluator
51 changes: 16 additions & 35 deletions configs/_base_/datasets/refcocog.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,24 @@
# dataset settings
dataset_type = 'RefCOCODataset'
data_root = 'data/refcoco/'
data_root = 'data/coco/'

backend_args = None

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]

test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_mask=True,
with_bbox=False,
with_seg=False,
with_label=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
'scale_factor', 'gt_masks', 'text'))
]

train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcocog/instances.json',
split_file='refcocog/refs(umd).p',
split='train',
pipeline=train_pipeline,
backend_args=backend_args))

val_dataloader = dict(
batch_size=1,
num_workers=2,
Expand All @@ -48,12 +28,12 @@
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
data_prefix=dict(img_path='train2014/'),
ann_file='refcocog/instances.json',
split_file='refcocog/refs(umd).p',
split='val',
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

test_dataloader = dict(
batch_size=1,
Expand All @@ -64,11 +44,12 @@
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
data_prefix=dict(img_path='train2014/'),
ann_file='refcocog/instances.json',
split_file='refcocog/refs(umd).p',
split='test',
pipeline=test_pipeline,
backend_args=backend_args))
text_mode='original',
pipeline=test_pipeline))

# TODO: set the metrics
val_evaluator = dict(type='RefSegMetric', metrics=['cIoU', 'mIoU'])
test_evaluator = val_evaluator
119 changes: 94 additions & 25 deletions mmdet/datasets/refcoco.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
import collections
import os.path as osp
import random
from typing import List

import mmengine
import numpy as np
from mmengine.dataset import BaseDataset
from pycocotools.coco import COCO

from mmdet.registry import DATASETS

Expand Down Expand Up @@ -38,10 +38,17 @@ def __init__(self,
data_prefix,
split_file,
split='train',
text_mode='random',
**kwargs):
self.split_file = split_file
self.split = split

assert text_mode in ['original', 'random', 'concat', 'select_first']
self.text_mode = text_mode

self._init_refs(
osp.join(data_root, ann_file), osp.join(data_root, split_file))

super().__init__(
data_root=data_root,
data_prefix=data_prefix,
Expand All @@ -55,36 +62,98 @@ def _join_prefix(self):

return super()._join_prefix()

def _init_refs(self, ann_file, split_file):
"""Initialize the refs for RefCOCO."""
self.instances = mmengine.load(ann_file, file_format='json')
splits = mmengine.load(split_file, file_format='pkl')

anns, imgs = {}, {}
for ann in self.instances['annotations']:
anns[ann['id']] = ann
for img in self.instances['images']:
imgs[img['id']] = img

refs, ref_to_ann = {}, {}
for ref in splits:
# ids
ref_id = ref['ref_id']
ann_id = ref['ann_id']
# add mapping related to ref
refs[ref_id] = ref
ref_to_ann[ref_id] = anns[ann_id]

self.refs = refs
self.ref_to_ann = ref_to_ann

def load_data_list(self) -> List[dict]:
"""Load data list."""
with mmengine.get_local_path(self.ann_file) as ann_file:
coco = COCO(ann_file)
splits = mmengine.load(self.split_file, file_format='pkl')
img_prefix = self.data_prefix['img_path']

ref_ids = [
ref['ref_id'] for ref in splits if ref['split'] == self.split
]
full_anno = []
for ref_id in ref_ids:
ref = self.refs[ref_id]
ann = self.ref_to_ann[ref_id]
ann.update(ref)
full_anno.append(ann)

image_id_list = []
final_anno = {}
for anno in full_anno:
image_id_list.append(anno['image_id'])
final_anno[anno['ann_id']] = anno
annotations = [value for key, value in final_anno.items()]

coco_train_id = []
image_annot = {}
for i in range(len(self.instances['images'])):
coco_train_id.append(self.instances['images'][i]['id'])
image_annot[self.instances['images'][i]
['id']] = self.instances['images'][i]

images = []
for image_id in list(set(image_id_list)):
images += [image_annot[image_id]]

data_list = []

grounding_dict = collections.defaultdict(list)
for anno in annotations:
image_id = int(anno['image_id'])
grounding_dict[image_id].append(anno)

join_path = mmengine.fileio.get_file_backend(img_prefix).join_path
for refer in splits:
if refer['split'] != self.split:
continue

ann = coco.anns[refer['ann_id']]
img = coco.imgs[ann['image_id']]
sentences = refer['sentences']
bbox = np.array(ann['bbox'], dtype=np.float32)
bbox[2:4] = bbox[0:2] + bbox[2:4] # XYWH -> XYXY
mask = np.array(ann['segmentation'], dtype=np.float32)

for sent in sentences:
data_info = {
'img_path': join_path(img_prefix, img['file_name']),
'image_id': ann['image_id'],
'ann_id': ann['id'],
'text': sent['sent'],
'gt_bboxes': bbox[None, :],
'gt_masks': mask[None, :],
}
data_list.append(data_info)
for image in images:
img_id = image['id']
grounding_anno = grounding_dict[img_id][0]
texts = [x['raw'].lower() for x in grounding_anno['sentences']]
if self.text_mode == 'random':
idx = random.randint(0, len(texts) - 1)
text = texts[idx]
elif self.text_mode == 'concat':
text = [''.join(texts)]
elif self.text_mode == 'select_first':
text = [texts[0]]
elif self.text_mode == 'original':
text = texts
else:
raise ValueError(f'Invalid text mode "{self.text_mode}".')
data_info = {
'img_path':
join_path(img_prefix, image['file_name']),
'img_id':
img_id,
'instances': [{
'mask': grounding_anno['segmentation'],
'ignore_flag': 0
}],
'text':
text
}
data_list.append(data_info)

if len(data_list) == 0:
raise ValueError(f'No sample in split "{self.split}".')
Expand Down
Loading

0 comments on commit ad3b574

Please sign in to comment.