@inproceedings{BarronCedenoAlamChakrabortyetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Chakraborty, Tonmoy and Elsayed, Tamer and Nakov, Preslav and Przybyła, Piotr and Struß, Julia Maria and Haouari, Fatima and Hasanain, Maram and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {The CLEF-2024 CheckThat! Lab}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, editor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-56069-9}, issn = {0302-9743}, doi = {10.1007/978-3-031-56069-9_62}, pages = {449 -- 458}, abstract = {The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, it has been focusing on new problems that can support the research and decision making during the verification process. In this new edition, we focus on new problems and -for the first time- we propose six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 identification of subjectivity (a follow up of CheckThat! 2023 edition), Task 3 identification of persuasion (a follow up of SemEval 2023), Task 4 detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 Rumor Verification using Evidence from Authorities (a first), and Task 6 robustness of credibility assessment with adversarial examples (a first). These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamStrussetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Struß, Julia Maria and Nakov, Preslav and Chakraborty, Tanmoy and Elsayed, Tamer and Przybyła, Piotr and Caselli, Tommaso and Da San Martino, Giovanni and Haouari, Fatima and Hasanain, Maram and Li, Chengkai and Piskorski, Jakub and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {Overview of the CLEF-2024 CheckThat! Lab : Check-Worthiness, Subjectivity, Persuasion, Roles, Authorities, and Adversarial Robustness}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, editor = {Goeuriot, Lorraine and Mulhem, Philippe and Qu{\´e}not, Georges and Schwab, Didier and Di Nunzio, Giorgio Maria and Soulier, Laure and Galušc{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a and Faggioli, Guglielmo and Ferro, Nicola}, publisher = {Springer Nature}, address = {Berlin}, isbn = {978-3-031-71907-3}, doi = {10.1007/978-3-031-71908-0}, pages = {28 -- 52}, abstract = {We describe the seventh edition of the CheckThat! lab, part of the 2024 Conference and Labs of the Evaluation Forum (CLEF). Previous editions of CheckThat! focused on the main tasks of the information verification pipeline: check-worthiness, identifying previously fact-checked claims, supporting evidence retrieval, and claim verification. In this edition, we introduced some new challenges, offering six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 on estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 on identification of subjectivity (a follow up of the CheckThat! 2023 edition), Task 3 on identification of the use of persuasion techniques (a follow up of SemEval 2023), Task 4 on detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 on rumor verification using evidence from authorities (new task), and Task 6 on robustness of credibility assessment with adversarial examples (new task). These are challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings. This year, CheckThat! was one of the most popular labs at CLEF-2024 in terms of team registrations: 130 teams. More than one-third of them (a total of 46) actually participated.}, subject = {Desinformation}, language = {en} }