@inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, Rub{\´e}n and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Mansour, Watheq and Hamdan, Bayan and Sheikh Ali, Zien and Babulkov, Nikolay and Nikolov, Alex and Koshore Shahi, Gautam and Struß, Julia Maria and Mandl, Thomas and Kutlu, Mucahid and Selim Kartal, Yavuz}, title = {Overview of the CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85251-1}, doi = {10.1007/978-3-030-85251-1_19}, pages = {264 -- 291}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality, and covers Arabic, Bulgarian, English, Spanish, and Turkish. Task 1 asks to predict which posts in a Twitter stream are worth fact-checking, focusing on COVID-19 and politics (in all five languages). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims (in Arabic and English). Task 3 asks to predict the veracity of a news article and its topical domain (in English). The evaluation is based on mean average precision or precision at rank k for the ranking tasks, and macro-F1 for the classification tasks. This was the most popular CLEF-2021 lab in terms of team registrations: 132 teams. Nearly one-third of them participated: 15, 5, and 25 teams submitted official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} } @inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, R{\´u}ben and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Babulkov, Nikolay and Nikolov, Alex and Shahi, Gautam Kishore and Struß, Julia Maria and Mandl, Thomas}, title = {The CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-72240-1}, doi = {10.1007/978-3-030-72240-1_75}, pages = {639 -- 649}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Cross-Language Evaluation Forum (CLEF). The lab evaluates technology supporting various tasks related to factuality, and it is offered in Arabic, Bulgarian, English, and Spanish. Task 1 asks to predict which tweets in a Twitter stream are worth fact-checking (focusing on COVID-19). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims. Task 3 asks to predict the veracity of a target news article and its topical domain. The evaluation is carried out using mean average precision or precision at rank k for the ranking tasks, and F1 for the classification tasks.}, subject = {Falschmeldung}, language = {en} } @inproceedings{NakovBarronCednoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Ced{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Beltr{\´a}n, Javier}, title = {The CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Advances in Information Retrieval. ECIR 2022}, booktitle = {Advances in Information Retrieval. ECIR 2022}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-99739-7}, issn = {0302-9743}, doi = {10.1007/978-3-030-99739-7_52}, pages = {416 -- 428}, abstract = {The fifth edition of the CheckThat! Lab is held as part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting various factuality tasks in seven languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 focuses on disinformation related to the ongoing COVID-19 infodemic and politics, and asks to predict whether a tweet is worth fact-checking, contains a verifiable factual claim, is harmful to the society, or is of interest to policy makers and why. Task 2 asks to retrieve claims that have been previously fact-checked and that could be useful to verify the claim in a tweet. Task 3 is to predict the veracity of a news article. Tasks 1 and 3 are classification problems, while Task 2 is a ranking one.}, subject = {Desinformation}, language = {en} } @inproceedings{NakovBarronCednoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Ced{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Wiegand, Michael and Siegel, Melanie and K{\"o}hler, Juliane}, title = {Overview of the CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-13643-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-13643-6_29}, pages = {495 -- 520}, abstract = {We describe the fifth edition of the CheckThat! lab, part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality in multiple languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 asks to identify relevant claims in tweets in terms of check-worthiness, verifiability, harmfullness, and attention-worthiness. Task 2 asks to detect previously fact-checked claims that could be relevant to fact-check a new claim. It targets both tweets and political debates/speeches. Task 3 asks to predict the veracity of the main claim in a news article. CheckThat! was the most popular lab at CLEF-2022 in terms of team registrations: 137 teams. More than one-third (37\%) of them actually participated: 18, 7, and 26 teams submitted 210, 37, and 126 official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamCasellietal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Da San Martino, Giovanni and Elsayed, Tamer and Galassi, Andrea and Haouari, Fatima and Ruggeri, Federico and Struß, Julia Maria and Nath Nandi, Rabindra and Cheema, Gullal S. and Azizov, Dilshod and Nakov, Preslav}, title = {The CLEF-2023 CheckThat! Lab: Checkworthiness, Subjectivity, Political Bias, Factuality, and Authority}, series = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, booktitle = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-28241-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-28241-6_59}, pages = {509 -- 517}, abstract = {The five editions of the CheckThat! lab so far have focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. The 2023 edition of the lab zooms into some of the problems and - for the first time - it offers five tasks in seven languages (Arabic, Dutch, English, German, Italian, Spanish, and Turkish): Task 1 asks to determine whether an item, text or a text plus an image, is check-worthy; Task 2 requires to assess whether a text snippet is subjective or not; Task 3 looks for estimating the political bias of a document or a news outlet; Task 4 requires to determine the level of factuality of a document or a news outlet; and Task 5 is about identifying authorities that should be trusted to verify a contended claim.}, subject = {Desinformation}, language = {en} }