@inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, R{\´u}ben and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Babulkov, Nikolay and Nikolov, Alex and Shahi, Gautam Kishore and Struß, Julia Maria and Mandl, Thomas}, title = {The CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-72240-1}, doi = {10.1007/978-3-030-72240-1_75}, pages = {639 -- 649}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Cross-Language Evaluation Forum (CLEF). The lab evaluates technology supporting various tasks related to factuality, and it is offered in Arabic, Bulgarian, English, and Spanish. Task 1 asks to predict which tweets in a Twitter stream are worth fact-checking (focusing on COVID-19). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims. Task 3 asks to predict the veracity of a target news article and its topical domain. The evaluation is carried out using mean average precision or precision at rank k for the ranking tasks, and F1 for the classification tasks.}, subject = {Falschmeldung}, language = {en} } @inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, Rub{\´e}n and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Mansour, Watheq and Hamdan, Bayan and Sheikh Ali, Zien and Babulkov, Nikolay and Nikolov, Alex and Koshore Shahi, Gautam and Struß, Julia Maria and Mandl, Thomas and Kutlu, Mucahid and Selim Kartal, Yavuz}, title = {Overview of the CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85251-1}, doi = {10.1007/978-3-030-85251-1_19}, pages = {264 -- 291}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality, and covers Arabic, Bulgarian, English, Spanish, and Turkish. Task 1 asks to predict which posts in a Twitter stream are worth fact-checking, focusing on COVID-19 and politics (in all five languages). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims (in Arabic and English). Task 3 asks to predict the veracity of a news article and its topical domain (in English). The evaluation is based on mean average precision or precision at rank k for the ranking tasks, and macro-F1 for the classification tasks. This was the most popular CLEF-2021 lab in terms of team registrations: 132 teams. Nearly one-third of them participated: 15, 5, and 25 teams submitted official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} } @inproceedings{StrussRuggeriBarronCedenoetal., author = {Struß, Julia Maria and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Dimitrov, Dimitar and Galassi, Andrea and Pachov, Georgi and Koychev, Ivan and Nakov, Preslav and Siegel, Melanie and Wiegand, Michael and Hasanain, Maram and Suwaileh, Reem and Zaghouani, Wajdi}, title = {Overview of the CLEF-2024 CheckThat! Lab Task 2 on Subjectivity in News Articles}, series = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, booktitle = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, editor = {Faggioli, Guglielmo and Ferro, Nicola and Galušč{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a}, address = {Frankreich}, organization = {University of Grenoble Alpes}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3740-3}, pages = {287 -- 298}, abstract = {We present an overview of Task 2 of the seventh edition of the CheckThat! lab at the 2024 iteration of the Conference and Labs of the Evaluation Forum (CLEF). The task focuses on subjectivity detection in news articles and was o ered in five languages: Arabic, Bulgarian, English, German, and Italian, as well as in a multilingual setting. The datasets for each language were carefully curated and annotated, comprising over 10,000 sentences from news articles. The task challenged participants to develop systems capable of distinguishing between subjective statements (refecting personal opinions or biases) and objective ones (presenting factual information) at the sentence level. A total of 15 teams participated in the task, submitting 36 valid runs across all language tracks. The participants used a variety of approaches, with transformer-based models being the most popular choice. Strategies included fine-tuning monolingual and multilingual models, and leveraging English models with automatic translation for the non-English datasets. Some teams also explored ensembles, feature engineering, and innovative techniques such as few-shot learning and in-context learning with large language models. The evaluation was based on macro-averaged F1 score. The results varied across languages, with the best performance achieved for Italian and German, followed by English. The Arabic track proved particularly challenging, with no team surpassing an F1 score of 0.50. This task contributes to the broader goal of enhancing the reliability of automated content analysis in the context of misinformation detection and fact-checking. The paper provides detailed insights into the datasets, participant approaches, and results, o ering a benchmark for the current state of subjectivity detection across multiple languages.}, subject = {Fehlinformation}, language = {en} } @inproceedings{BarronCedenoAlamChakrabortyetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Chakraborty, Tonmoy and Elsayed, Tamer and Nakov, Preslav and Przybyła, Piotr and Struß, Julia Maria and Haouari, Fatima and Hasanain, Maram and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {The CLEF-2024 CheckThat! Lab}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, editor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-56069-9}, issn = {0302-9743}, doi = {10.1007/978-3-031-56069-9_62}, pages = {449 -- 458}, abstract = {The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, it has been focusing on new problems that can support the research and decision making during the verification process. In this new edition, we focus on new problems and -for the first time- we propose six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 identification of subjectivity (a follow up of CheckThat! 2023 edition), Task 3 identification of persuasion (a follow up of SemEval 2023), Task 4 detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 Rumor Verification using Evidence from Authorities (a first), and Task 6 robustness of credibility assessment with adversarial examples (a first). These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamCasellietal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Da San Martino, Giovanni and Elsayed, Tamer and Galassi, Andrea and Haouari, Fatima and Ruggeri, Federico and Struß, Julia Maria and Nath Nandi, Rabindra and Cheema, Gullal S. and Azizov, Dilshod and Nakov, Preslav}, title = {The CLEF-2023 CheckThat! Lab: Checkworthiness, Subjectivity, Political Bias, Factuality, and Authority}, series = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, booktitle = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-28241-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-28241-6_59}, pages = {509 -- 517}, abstract = {The five editions of the CheckThat! lab so far have focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. The 2023 edition of the lab zooms into some of the problems and - for the first time - it offers five tasks in seven languages (Arabic, Dutch, English, German, Italian, Spanish, and Turkish): Task 1 asks to determine whether an item, text or a text plus an image, is check-worthy; Task 2 requires to assess whether a text snippet is subjective or not; Task 3 looks for estimating the political bias of a document or a news outlet; Task 4 requires to determine the level of factuality of a document or a news outlet; and Task 5 is about identifying authorities that should be trusted to verify a contended claim.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamStrussetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Struß, Julia Maria and Nakov, Preslav and Chakraborty, Tanmoy and Elsayed, Tamer and Przybyła, Piotr and Caselli, Tommaso and Da San Martino, Giovanni and Haouari, Fatima and Hasanain, Maram and Li, Chengkai and Piskorski, Jakub and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {Overview of the CLEF-2024 CheckThat! Lab : Check-Worthiness, Subjectivity, Persuasion, Roles, Authorities, and Adversarial Robustness}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, editor = {Goeuriot, Lorraine and Mulhem, Philippe and Qu{\´e}not, Georges and Schwab, Didier and Di Nunzio, Giorgio Maria and Soulier, Laure and Galušc{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a and Faggioli, Guglielmo and Ferro, Nicola}, publisher = {Springer Nature}, address = {Berlin}, isbn = {978-3-031-71907-3}, doi = {10.1007/978-3-031-71908-0}, pages = {28 -- 52}, abstract = {We describe the seventh edition of the CheckThat! lab, part of the 2024 Conference and Labs of the Evaluation Forum (CLEF). Previous editions of CheckThat! focused on the main tasks of the information verification pipeline: check-worthiness, identifying previously fact-checked claims, supporting evidence retrieval, and claim verification. In this edition, we introduced some new challenges, offering six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 on estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 on identification of subjectivity (a follow up of the CheckThat! 2023 edition), Task 3 on identification of the use of persuasion techniques (a follow up of SemEval 2023), Task 4 on detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 on rumor verification using evidence from authorities (new task), and Task 6 on robustness of credibility assessment with adversarial examples (new task). These are challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings. This year, CheckThat! was one of the most popular labs at CLEF-2024 in terms of team registrations: 130 teams. More than one-third of them (a total of 46) actually participated.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamGalassietal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Galassi, Andrea and Da San Martino, Giovanni and Nakov, Preslav and Elsayed, Tamer and Azizov, Dilshod and Caselli, Tommaso and Cheema, Gullal S. and Haouari, Fatima and Hasanain, Maram and Kutlu, Mucahid and Li, Chengkai and Ruggeri, Federico and Struß, Julia Maria and Zaghouani, Wajdi}, title = {Overview of the CLEF-2023 CheckThat! Lab on Checkworthiness, Subjectivity, Political Bias, Factuality, and Authority of News Articles and Their Source}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, editor = {Arampatzis, Avi and Kanoulas, Evangelos and Tsikrika, Theodora and Vrochidis, Stefanos and Giachanou, Anastasia and Li, Dan and Aliannejadi, Mohammad and Vlachos, Michalis and Faggioli, Guglielmo and Ferro, Nicola}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-42448-9}, issn = {1611-3349}, doi = {10.1007/978-3-031-42448-9_20}, pages = {251 -- 275}, abstract = {We describe the sixth edition of the CheckThat! lab, part of the 2023 Conference and Labs of the Evaluation Forum (CLEF). The five previous editions of CheckThat! focused on the main tasks of the information verification pipeline: check-worthiness, verifying whether a claim was fact-checked before, supporting evidence retrieval, and claim verification. In this sixth edition, we zoom into some new problems and for the first time we offer five tasks in seven languages: Arabic, Dutch, English, German, Italian, Spanish, and Turkish. Task 1 asks to determine whether an item - text or text plus image- is check-worthy. Task 2 aims to predict whether a sentence from a news article is subjective or not. Task 3 asks to assess the political bias of the news at the article and at the media outlet level. Task 4 focuses on the factuality of reporting of news media. Finally, Task 5 looks at identifying authorities in Twitter that could help verify a given target claim. For a second year, CheckThat! was the most popular lab at CLEF-2023 in terms of team registrations: 127 teams. About one-third of them (a total of 37) actually participated.}, subject = {Desinformation}, language = {en} } @inproceedings{GalassiRuggeriBarronCedenoetal., author = {Galassi, Andrea and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Kutlu, Mucahid and Struß, Julia Maria and Antici, Francesco and Hasanain, Maram and K{\"o}hler, Juliane and Korre, Katerina and Leistra, Folkert and Muti, Arianna and Siegel, Melanie and T{\"u}rkmen, Mehmet Deniz and Wiegand, Michael and Zaghouani, Wajdi}, title = {Overview of the CLEF-2023 CheckThat! Lab: Task 2 on Subjectivity in News Articles}, series = {CLEF 2023 Working Notes}, booktitle = {CLEF 2023 Working Notes}, editor = {Aliannejadi, Mohammad and Faggiolo, Guglielmo and Ferro, Nicola and Vlachos, Michalis}, address = {Thessaloniki}, organization = {Centre for Research and Technology Hellas}, pages = {236 -- 249}, abstract = {We describe the outcome of the 2023 edition of the CheckThat!Lab at CLEF. We focus on subjectivity (Task 2), which has been proposed for the first time. It aims at fostering the technology for the identification of subjective text fragments in news articles. For that, we produced corpora consisting of 9,530 manually-annotated sentences, covering six languages - Arabic, Dutch, English, German, Italian, and Turkish. Task 2 attracted 12 teams, which submitted a total of 40 final runs covering all languages. The most successful approaches addressed the task using state-of-the-art multilingual transformer models, which were fine-tuned on language-specific data. Teams also experimented with a rich set of other neural architectures, including foundation models, zero-shot classifiers, and standard transformers, mainly coupled with data augmentation and multilingual training strategies to address class imbalance. We publicly release all the datasets and evaluation scripts, with the purpose of promoting further research on this topic.}, subject = {Desinformation}, language = {en} } @inproceedings{NakovBarronCedenoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Cede{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Wiegand, Michael and Siegel, Melanie and K{\"o}hler, Juliane}, title = {Overview of the CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-13643-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-13643-6_29}, pages = {495 -- 520}, abstract = {We describe the fifth edition of the CheckThat! lab, part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality in multiple languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 asks to identify relevant claims in tweets in terms of check-worthiness, verifiability, harmfullness, and attention-worthiness. Task 2 asks to detect previously fact-checked claims that could be relevant to fact-check a new claim. It targets both tweets and political debates/speeches. Task 3 asks to predict the veracity of the main claim in a news article. CheckThat! was the most popular lab at CLEF-2022 in terms of team registrations: 137 teams. More than one-third (37\%) of them actually participated: 18, 7, and 26 teams submitted 210, 37, and 126 official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} } @inproceedings{NakovBarronCedenoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Cede{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Beltr{\´a}n, Javier}, title = {The CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Advances in Information Retrieval. ECIR 2022}, booktitle = {Advances in Information Retrieval. ECIR 2022}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-99739-7}, issn = {0302-9743}, doi = {10.1007/978-3-030-99739-7_52}, pages = {416 -- 428}, abstract = {The fifth edition of the CheckThat! Lab is held as part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting various factuality tasks in seven languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 focuses on disinformation related to the ongoing COVID-19 infodemic and politics, and asks to predict whether a tweet is worth fact-checking, contains a verifiable factual claim, is harmful to the society, or is of interest to policy makers and why. Task 2 asks to retrieve claims that have been previously fact-checked and that could be useful to verify the claim in a tweet. Task 3 is to predict the veracity of a news article. Tasks 1 and 3 are classification problems, while Task 2 is a ranking one.}, subject = {Desinformation}, language = {en} } @inproceedings{AlamStrussChakrabortyetal., author = {Alam, Firoj and Struß, Julia Maria and Chakraborty, Tanmoy and Dietze, Stefan and Hafid, Salim and Korre, Katerina and Muti, Arianna and Nakov, Preslav and Ruggeri, Federico and Schellhammer, Sebastian and Setty, Vinay and Sundriyal, Megha and Todorov, Konstantin and Venktesh, V.}, title = {The CLEF-2025 CheckThat! Lab}, series = {Advances in Information Retrieval : 47th European Conference on Information Retrieval, ECIR 2025, Lucca, Italy, April 6-10, 2025, Proceedings, Part V}, booktitle = {Advances in Information Retrieval : 47th European Conference on Information Retrieval, ECIR 2025, Lucca, Italy, April 6-10, 2025, Proceedings, Part V}, editor = {Hauff, Claudia and Macdonald, Craig and Jannach, Dietmar and Nardini, Franco Maria and Pinelli, Fabio and Silvestri, Fabrizio and Tonellotto, Nicola}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-88720-8}, issn = {0302-9743}, doi = {/10.1007/978-3-031-88720-8_68}, pages = {467 -- 478}, abstract = {The CheckThat! lab aims to advance the development of innovative technologies designed to identify and to counteract online disinformation and manipulation efforts across various languages and platforms. The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, the lab has broadened the focus and addressed new problems on auxiliary tasks supporting research and decision-making during the verification process. In the 2025 edition of the lab, we consider tasks at the core of the verification pipeline again as well as auxiliary tasks: Task 1 is on identification of subjectivity (a follow up of the CheckThat! 2024 edition), Task 2 is on claim normalization, Task 3 addresses fact-checking numerical claims, and Task 4 focuses on scientific web discourse processing. These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual settings.}, subject = {Desinformation}, language = {en} }