-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpublications.bib
156 lines (143 loc) · 20.4 KB
/
publications.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
@thesis{etxaniz2021prometa,
title = {ProMeta: softwarearen garapenerako prozesuen definizio eta ezarpenerako sistema metaereduetan oinarrituta},
author = {Julen Etxaniz},
year = {2021},
date = {2021-10-08},
url = {https://addi.ehu.es/handle/10810/53310},
journal = {ADDI},
abstract = {The objective of the project is to build a system for the definition and implementation of software development processes based on metamodels. In fact, there are several methodologies that are suitable for software development. It is important to define the information of these methodologies through models so that they can be managed flexibly in the future and improvements can be made. In addition, it is necessary to build a system that establishes a methodology using information from the model for use by development teams in projects. The OpenUP methodology was used for the development of the project and the CCII-N2016-02 standard for the drafting of the project documentation and memory.},
keywords = {Software Engineering, Web Development}
}
@thesis{etxaniz2023grounding,
title = {Grounding Language Models for Compositional and Spatial Reasoning},
author = {Julen Etxaniz and Oier Lopez de Lacalle and Aitor Soroa},
year = {2023},
date = {2023-06-30},
url = {https://addi.ehu.es/handle/10810/61827},
journal = {ADDI},
abstract = {Humans can learn to understand and process the distribution of space, and one of the initial tasks of Artificial Intelligence has been to show machines the relationships between space and the objects that appear in it. Humans naturally combine vision and textual information to acquire compositional and spatial relationships among objects, and when reading a text, we are able to mentally depict the spatial relationships that may appear in it. Thus, the visual differences between images depicting "a person sits and a dog stands" and "a person stands and a dog sits" are obvious for humans, but still not clear for automatic systems.
In this project, we propose to evaluate grounded Neural Language models that can perform compositional and spatial reasoning. Neural Language models (LM) have shown impressive capabilities on many NLP tasks but, despite their success, they have been criticized for their lack of meaning. Vision-and-Language models (VLM), trained jointly on text and image data, have been offered as a response to such criticisms, but recent work has shown that these models struggle to ground spatial concepts properly. In the project, we evaluate state-of-the-art pre-trained and fine-tuned VLMs to understand their grounding level on compositional and spatial reasoning. We also propose a variety of methods to create synthetic datasets specially focused on compositional reasoning.
We managed to accomplish all the objectives of this work. First, we improved the state-of-the-art in compositional reasoning. Next, we performed some zero-shot experiments on spatial reasoning. Finally, we explored three alternatives for synthetic dataset creation: text-to-image generation, image captioning and image retrieval. Code is released at https://github.com/juletx/spatial-reasoning and models are released at https://huggingface.co/juletxara.},
keywords = {Artificial Intelligence, Deep Learning, Natural Language Processing, Computer Vision, Grounding, Visual Reasoning, Compositional Reasoning, Spatial Reasoning}
}
@article{etxaniz2023multilingual,
title = {Do Multilingual Language Models Think Better in English?},
author = {Julen Etxaniz and Gorka Azkune and Aitor Soroa and Oier Lopez de Lacalle and Mikel Artetxe},
booktitle = {NAACL 2024},
year = {2023},
date = {2023-08-02},
eprint = {2308.01223},
archiveprefix = {arXiv},
primaryclass = {cs.CL},
journal = {arXiv},
abstract = {Translate-test is a popular technique to improve the performance of multilingual language models. This approach works by translating the input into English using an external machine translation system, and running inference over the translated input. However, these improvements can be attributed to the use of a separate translation system, which is typically trained on large amounts of parallel data not seen by the language model. In this work, we introduce a new approach called self-translate, which overcomes the need of an external translation system by leveraging the few-shot translation capabilities of multilingual language models. Experiments over 5 tasks show that self-translate consistently outperforms direct inference, demonstrating that language models are unable to leverage their full multilingual potential when prompted in non-English languages. Our code is available at https://github.com/juletx/self-translate.},
keywords = {Natural Language Processing, Large Language Models, Deep Learning, Multilinguality}
}
@article{sainz2023nlp,
title = {NLP Evaluation in trouble: On the Need to Measure LLM Data Contamination for each Benchmark},
author = {Oscar Sainz and Jon Ander Campos and Iker García-Ferrero and Julen Etxaniz and Oier Lopez de Lacalle and Eneko Agirre},
booktitle = {EMNLP 2023 Findings},
year = {2023},
date = {2023-10-27},
eprint = {2310.18018},
archiveprefix = {arXiv},
primaryclass = {cs.CL},
abstract = {In this position paper, we argue that the classical evaluation on Natural Language Processing (NLP) tasks using annotated benchmarks is in trouble. The worst kind of data contamination happens when a Large Language Model (LLM) is trained on the test split of a benchmark, and then evaluated in the same benchmark. The extent of the problem is unknown, as it is not straightforward to measure. Contamination causes an overestimation of the performance of a contaminated model in a target benchmark and associated task with respect to their non-contaminated counterparts. The consequences can be very harmful, with wrong scientific conclusions being published while other correct ones are discarded. This position paper defines different levels of data contamination and argues for a community effort, including the development of automatic and semi-automatic measures to detect when data from a benchmark was exposed to a model, and suggestions for flagging papers with conclusions that are compromised by data contamination.},
keywords = {Natural Language Processing, Large Language Models, Evaluation, Data Contamination, Deep Learning}
}
@article{etxaniz2024latxa,
title = {Latxa: An Open Language Model and Evaluation Suite for Basque},
author = {Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
booktitle = {ACL 2024},
year = {2024},
date = {2024-03-29},
eprint = {2403.20266},
archiveprefix = {arXiv},
primaryclass = {cs.CL},
abstract = {We introduce Latxa, a family of large language models for Basque ranging from 7 to 70 billion parameters. Latxa is based on Llama 2, which we continue pretraining on a new Basque corpus comprising 4.3M documents and 4.2B tokens. Addressing the scarcity of high-quality benchmarks for Basque, we further introduce 4 multiple choice evaluation datasets: EusProficiency, comprising 5,169 questions from official language proficiency exams; EusReading, comprising 352 reading comprehension questions; EusTrivia, comprising 1,715 trivia questions from 5 knowledge areas; and EusExams, comprising 16,774 questions from public examinations. In our extensive evaluation, Latxa outperforms all previous open models we compare to by a large margin. In addition, it is competitive with GPT-4 Turbo in language proficiency and understanding, despite lagging behind in reading comprehension and knowledge-intensive tasks. Both the Latxa family of models, as well as our new pretraining corpora and evaluation datasets, are publicly available under open licenses at https://github.com/hitz-zentroa/latxa. Our suite enables reproducible research on methods to build LLMs for low-resource languages.},
keywords = {Natural Language Processing, Large Language Models, Deep Learning, Multilinguality, Basque}
}
@article{heredia2024xnlieu,
title = {XNLIeu: a dataset for cross-lingual NLI in Basque},
author = {Maite Heredia and Julen Etxaniz and Muitze Zulaika and Xabier Saralegi and Jeremy Barnes and Aitor Soroa},
booktitle = {NAACL 2024},
year = {2024},
date = {2024-04-10},
eprint = {2404.06996},
archiveprefix = {arXiv},
primaryclass = {cs.CL},
abstract = {XNLI is a popular Natural Language Inference (NLI) benchmark widely used to evaluate cross-lingual Natural Language Understanding (NLU) capabilities across languages. In this paper, we expand XNLI to include Basque, a low-resource language that can greatly benefit from transfer-learning approaches. The new dataset, dubbed XNLIeu, has been developed by first machine-translating the English XNLI corpus into Basque, followed by a manual post-edition step. We have conducted a series of experiments using mono- and multilingual LLMs to assess a) the effect of professional post-edition on the MT system; b) the best cross-lingual strategy for NLI in Basque; and c) whether the choice of the best cross-lingual strategy is influenced by the fact that the dataset is built by translation. The results show that post-edition is necessary and that the translate-train cross-lingual strategy obtains better results overall, although the gain is lower when tested in a dataset that has been built natively from scratch. Our code and datasets are publicly available under open licenses at https://github.com/hitz-zentroa/xnli-eu.},
keywords = {Natural Language Processing, Large Language Models, Deep Learning, Multilinguality, Basque}
}
@article{agirre2024ikergaitu,
title = {IKER-GAITU: research on language technology for Basque and other low-resource languages},
author = {Agirre, Eneko and Aldabe, Itziar and Arregi, Xabier and Artetxe, Mikel and Atutxa, Unai and Azurmendi, Ekhi and De la Iglesia, Iker and Etxaniz, Julen and García-Romillo, Victor and Hernaez-Rioja, Inma and others},
year = {2024},
date = {2024-04-15},
booktitle = {PROJECTS & DEMOS SEPLN - CEDI 2024},
abstract = {The general objective of the IKER-GAITU project is to research on language technology to increase the presence of Basque in the digital environment. It will be carried out between 2023 and 2025 thanks to a grant from the Department of Culture and Language Policy of the Basque Government. Current techniques require enormous amounts of textual and oral data per language. On the other hand, the data available for Basque and other low-resource languages might not be enough to attain the same quality as larger languages with the current technology. For this reason, it is essential to research on language technology, so that low-resource languages are present with the same quality as the rest of the languages in these technologies. IKER-GAITU pursues the following research objectives: 1. A system that automatically captures the level of Basque proficiency, written and oral; 2. Bring pSersonalized voice technology to people with disabilities; 3. Spontaneous voice transcription, both when Basque and Spanish are mixed and when there are several speakers; 4. Textual conversational systems in Basque that match the quality of the most powerful large language models. In this project summary we present the results for the first year. More information at https://hitz.eus/iker-gaitu.},
keywords = {Natural Language Processing, Large Language Models, Deep Learning, Multilinguality, Basque}
}
@article{biderman2024lmevaluation,
title={Lessons from the Trenches on Reproducible Evaluation of Language Models},
author={Stella Biderman and Hailey Schoelkopf and Lintang Sutawika and Leo Gao and Jonathan Tow and Baber Abbasi and Alham Fikri Aji and Pawan Sasanka Ammanamanchi and Sidney Black and Jordan Clive and Anthony DiPofi and Julen Etxaniz and Benjamin Fattori and Jessica Zosa Forde and Charles Foster and Jeffrey Hsu and Mimansa Jaiswal and Wilson Y. Lee and Haonan Li and Charles Lovering and Niklas Muennighoff and Ellie Pavlick and Jason Phang and Aviya Skowron and Samson Tan and Xiangru Tang and Kevin A. Wang and Genta Indra Winata and François Yvon and Andy Zou},
year={2024},
date={2024-05-23},
eprint={2405.14782},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2405.14782},
abstract={Effective evaluation of language models remains an open challenge in NLP. Researchers and engineers face methodological issues such as the sensitivity of models to evaluation setup, difficulty of proper comparisons across methods, and the lack of reproducibility and transparency. In this paper we draw on three years of experience in evaluating large language models to provide guidance and lessons for researchers. First, we provide an overview of common challenges faced in language model evaluation. Second, we delineate best practices for addressing or lessening the impact of these challenges on research. Third, we present the Language Model Evaluation Harness (lm-eval): an open source library for independent, reproducible, and extensible evaluation of language models that seeks to address these issues. We describe the features of the library as well as case studies in which the library has been used to alleviate these methodological concerns.},
keywords={Natural Language Processing, Large Language Models, Deep Learning, Evaluation, Reproducibility}
}
@article{etxaniz2024bertaqa,
title={BertaQA: How Much Do Language Models Know About Local Culture?},
author={Julen Etxaniz and Gorka Azkune and Aitor Soroa and Oier Lopez de Lacalle and Mikel Artetxe},
year={2024},
date={2024-06-11},
booktitle = {NeurIPS Datasets and Benchmarks 2024},
eprint={2406.07302},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2406.07302},
abstract={Large Language Models (LLMs) exhibit extensive knowledge about the world, but most evaluations have been limited to global or anglocentric subjects. This raises the question of how well these models perform on topics relevant to other cultures, whose presence on the web is not that prominent. To address this gap, we introduce BertaQA, a multiple-choice trivia dataset that is parallel in English and Basque. The dataset consists of a local subset with questions pertinent to the Basque culture, and a global subset with questions of broader interest. We find that state-of-the-art LLMs struggle with local cultural knowledge, even as they excel on global topics. However, we show that continued pre-training in Basque significantly improves the models' performance on Basque culture, even when queried in English. To our knowledge, this is the first solid evidence of knowledge transfer from a low-resource to a high-resource language. Our analysis sheds light on the complex interplay between language and knowledge, and reveals that some prior findings do not fully hold when reassessed on local topics. Our dataset and evaluation code are available under open licenses at https://github.com/juletx/BertaQA.},
keywords={Natural Language Processing, Large Language Models, Deep Learning, Evaluation, Multilinguality, Culture, Basque}
}
@article{perez2024latxa,
title={Latxa Euskarazko Hizkuntza-Eredua},
author={Perez, Naiara and Etxaniz, Julen and Sainz, Oscar and Aldabe, Itziar and Rigau, German and Agirre, Eneko and Salem, Ahmed and Ormazabal, Aitor and Artetxe, Mikel and Soroa, Aitor},
journal={EKAIA EHUko Zientzia eta Teknologia aldizkaria},
year={2024},
date={2024-09-24},
abstract={Artikulu honetan Latxa hizkuntza-ereduak (HE) aurkeztuko ditugu, egun euskararako garatu diren HE handienak. Latxa HEek 7.000 miloi parametrotik 70.000 milioira bitartean dituzte, eta ingeleseko LLama 2 ereduetatik eratorriak dira. Horretarako, LLama 2 gainean aurreikasketa jarraitua izeneko prozesua gauzatu da, 4.3 milioi dokumentu eta 4.200 milioi token duen euskarazko corpusa erabiliz. Euskararentzat kalitate handiko ebaluazio multzoen urritasunari aurre egiteko, lau ebaluazio multzo berri bildu ditugu: EusProficiency, EGA azterketaren atariko frogako 5.169 galdera biltzen dituena; EusReading, irakurketaren ulermeneko 352 galdera biltzen dituena; EusTrivia, 5 arlotako ezagutza orokorreko 1.715 galdera biltzen dituena; eta EusExams, oposizioetako 16.774 galdera biltzen dituena. Datu-multzo berri hauek erabiliz, Latxa eta beste euskarazko HEak ebaluatu ditugu (elebakar zein eleanitzak), eta esperimentuek erakusten dute Latxak aurreko eredu ireki guztiak gainditzen dituela. Halaber, GPT-4 Turbo HE komertzialarekiko emaitza konpetitiboak lortzen ditu Latxak, hizkuntza-ezagutzan eta ulermenean, testu-irakurmenean zein ezagutza intentsiboa eskatzen duten atazetan atzeratuta egon arren. Bai Latxa ereduen familia, baita gure corpus eta ebaluazio-datu berriak ere lizentzia irekien pean daude publiko https://github. com/hitz-zentroa/latxa helbidean.},
keywords={Natural Language Processing, Large Language Models, Deep Learning, Multilinguality, Basque}
}
@article{pensa2024gita4calamita,
title={GITA4CALAMITA - Evaluating the Physical Commonsense Understanding of Italian LLMs in a Multi-layered Approach: A CALAMITA Challenge},
author={Pensa, Giulia and Azurmendi, Ekhi and Etxaniz, Julen and Altuna, Bego{\~n}a and Gonzalez-Dios, Itziar},
year={2024},
date={2024-12-06},
booktitle = {CLiC-it 2024},
url = {https://ceur-ws.org/Vol-3878/127_calamita_long.pdf},
abstract = {In the context of the CALAMITA Challenge, we investigate the physical commonsense reasoning capabilities of large language models (LLMs) and introduce a methodology to assess their understanding of the physical world. To this end, we use a test set designed to evaluate physical commonsense reasoning in LLMs for the Italian language. We present a tiered dataset,
named the Graded Italian Annotated dataset (GITA), which is written and annotated by a professional linguist. This dataset
enables us to focus on three distinct levels of commonsense understanding. Our benchmark aims to evaluate three specific
tasks: identifying plausible and implausible stories within our dataset, identifying the conflict that generates an implausible
story, and identifying the physical states that make a story implausible. We perform these tasks using LLAMA3, Gemma2
and Mistral. Our findings reveal that, although the models may excel at high-level classification tasks, their reasoning is
inconsistent and unverifiable, as they fail to capture intermediate evidence.},
keywords = {Natural Language Processing, Large Language Models, Deep Learning, Evaluation, Commonsense Reasoning, Italian}
}
@article{bengoetxea2024hitzvardial,
title={HiTZ at VarDial 2025 NorSID: Overcoming Data Scarcity with Language Transfer and Automatic Data Annotation},
author={Jaione Bengoetxea and Mikel Zubillaga and Ekhi Azurmendi and Maite Heredia and Julen Etxaniz and Markel Ferro and Jeremy Barnes},
year={2024},
date={2024-12-13},
eprint={2412.10095},
archivePrefix={arXiv},
primaryClass={cs.CL},
booktitle = {COLING 2025},
url={https://arxiv.org/abs/2412.10095},
abstract={In this paper we present our submission for the NorSID Shared Task as part of the 2025 VarDial Workshop (Scherrer et al., 2025), consisting of three tasks: Intent Detection, Slot Filling and Dialect Identification, evaluated using data in different dialects of the Norwegian language. For Intent Detection and Slot Filling, we have fine-tuned a multitask model in a cross-lingual setting, to leverage the xSID dataset available in 17 languages. In the case of Dialect Identification, our final submission consists of a model fine-tuned on the provided development set, which has obtained the highest scores within our experiments. Our final results on the test set show that our models do not drop in performance compared to the development set, likely due to the domain-specificity of the dataset and the similar distribution of both subsets. Finally, we also report an in-depth analysis of the provided datasets and their artifacts, as well as other sets of experiments that have been carried out but did not yield the best results. Additionally, we present an analysis on the reasons why some methods have been more successful than others; mainly the impact of the combination of languages and domain-specificity of the training data on the results.},
keywords={Natural Language Processing, Large Language Models, Deep Learning, Multilinguality, Dialects, Norwegian}
}