-
Notifications
You must be signed in to change notification settings - Fork 4
/
example.bib
31 lines (28 loc) · 1.88 KB
/
example.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
@misc{minted,
title = "The {\tt minted} package: {H}ighlighted source code in \LaTeX",
author = "Geoffrey M. Poore",
url = "http://tug.ctan.org/macros/latex/contrib/minted/minted.pdf",
year = "2017",
howpublished = "CTAN",
}
@misc{fontawesome5,
url = "http://mirrors.ibiblio.org/CTAN/fonts/fontawesome5/doc/fontawesome5.pdf",
title = "The {\tt fontawesome5} package",
author = "Marcel Krüger and {Font Awesome}",
year = "2021",
howpublished = "CTAN",
}
@inproceedings{iki-aizawa-2020-language,
title = "Language-{C}onditioned {F}eature {P}yramids for {V}isual {S}election {T}asks",
author = "Iki, Taichi and
Aizawa, Akiko",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.420",
doi = "10.18653/v1/2020.findings-emnlp.420",
pages = "4687--4697",
abstract = "Referring expression comprehension, which is the ability to locate language to an object in an image, plays an important role in creating common ground. Many models that fuse visual and linguistic features have been proposed. However, few models consider the fusion of linguistic features with multiple visual features with different sizes of receptive fields, though the proper size of the receptive field of visual features intuitively varies depending on expressions. In this paper, we introduce a neural network architecture that modulates visual features with varying sizes of receptive field by linguistic features. We evaluate our architecture on tasks related to referring expression comprehension in two visual dialogue games. The results show the advantages and broad applicability of our architecture. Source code is available at https://github.com/Alab-NII/lcfp .",
}