@inproceedings{4b9fe16eb4d04cf79df7e02d3d0316bf,
title = "Discovering Financial Hypernyms by Prompting Masked Language Models",
abstract = "With the rising popularity of Transformer-based language models, several studies have tried to exploit their masked language modeling capabilities to automatically extract relational linguistic knowledge, although this kind of research has rarely investigated semantic relations in specialized domains. The present study aims at testing a general-domain and a domain-adapted Transformer model on two datasets of financial term-hypernym pairs using the prompt methodology. Our results show that the differences of prompts impact critically on models{\textquoteright} performance, and that domain adaptation to financial texts generally improves the capacity of the models to associate the target terms with the right hypernyms, although the more successful models are those which retain a general-domain vocabulary.",
keywords = "Financial Natural Language Processing, Language Modeling, Semantic Relations, Transformers",
author = "Bo Peng and Emmanuele Chersoni and Hsu, {Yu Yin} and Huang, {Chu Ren}",
note = "Publisher Copyright: {\textcopyright} European Language Resources Association (ELRA); 4th Financial Narrative Processing Workshop, FNP 2022 ; Conference date: 24-06-2022",
year = "2022",
month = jan,
day = "24",
language = "English",
series = "Proceedings of the Language Resources and Evaluation Conference, LREC 2022 Workshop on 4th Financial Narrative Processing Workshop, FNP 2022",
publisher = "European Language Resources Association (ELRA)",
pages = "10--16",
editor = "Mahmoud El-Haj and Paul Rayson and Nadhem Zmandar",
booktitle = "Proceedings of the Language Resources and Evaluation Conference, LREC 2022 Workshop on 4th Financial Narrative Processing Workshop, FNP 2022",
}