@inproceedings{StrussRuggeriBarronCedenoetal., author = {Struß, Julia Maria and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Dimitrov, Dimitar and Galassi, Andrea and Pachov, Georgi and Koychev, Ivan and Nakov, Preslav and Siegel, Melanie and Wiegand, Michael and Hasanain, Maram and Suwaileh, Reem and Zaghouani, Wajdi}, title = {Overview of the CLEF-2024 CheckThat! Lab Task 2 on Subjectivity in News Articles}, series = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, booktitle = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, editor = {Faggioli, Guglielmo and Ferro, Nicola and Galušč{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a}, address = {Frankreich}, organization = {University of Grenoble Alpes}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3740-3}, pages = {287 -- 298}, abstract = {We present an overview of Task 2 of the seventh edition of the CheckThat! lab at the 2024 iteration of the Conference and Labs of the Evaluation Forum (CLEF). The task focuses on subjectivity detection in news articles and was o ered in five languages: Arabic, Bulgarian, English, German, and Italian, as well as in a multilingual setting. The datasets for each language were carefully curated and annotated, comprising over 10,000 sentences from news articles. The task challenged participants to develop systems capable of distinguishing between subjective statements (refecting personal opinions or biases) and objective ones (presenting factual information) at the sentence level. A total of 15 teams participated in the task, submitting 36 valid runs across all language tracks. The participants used a variety of approaches, with transformer-based models being the most popular choice. Strategies included fine-tuning monolingual and multilingual models, and leveraging English models with automatic translation for the non-English datasets. Some teams also explored ensembles, feature engineering, and innovative techniques such as few-shot learning and in-context learning with large language models. The evaluation was based on macro-averaged F1 score. The results varied across languages, with the best performance achieved for Italian and German, followed by English. The Arabic track proved particularly challenging, with no team surpassing an F1 score of 0.50. This task contributes to the broader goal of enhancing the reliability of automated content analysis in the context of misinformation detection and fact-checking. The paper provides detailed insights into the datasets, participant approaches, and results, o ering a benchmark for the current state of subjectivity detection across multiple languages.}, subject = {Fehlinformation}, language = {en} } @inproceedings{BarronCedenoAlamChakrabortyetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Chakraborty, Tonmoy and Elsayed, Tamer and Nakov, Preslav and Przybyła, Piotr and Struß, Julia Maria and Haouari, Fatima and Hasanain, Maram and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {The CLEF-2024 CheckThat! Lab}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, editor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-56069-9}, issn = {0302-9743}, doi = {10.1007/978-3-031-56069-9_62}, pages = {449 -- 458}, abstract = {The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, it has been focusing on new problems that can support the research and decision making during the verification process. In this new edition, we focus on new problems and -for the first time- we propose six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 identification of subjectivity (a follow up of CheckThat! 2023 edition), Task 3 identification of persuasion (a follow up of SemEval 2023), Task 4 detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 Rumor Verification using Evidence from Authorities (a first), and Task 6 robustness of credibility assessment with adversarial examples (a first). These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamStrussetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Struß, Julia Maria and Nakov, Preslav and Chakraborty, Tanmoy and Elsayed, Tamer and Przybyła, Piotr and Caselli, Tommaso and Da San Martino, Giovanni and Haouari, Fatima and Hasanain, Maram and Li, Chengkai and Piskorski, Jakub and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {Overview of the CLEF-2024 CheckThat! Lab : Check-Worthiness, Subjectivity, Persuasion, Roles, Authorities, and Adversarial Robustness}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction : 15th International Conference of the CLEF Association, CLEF 2024, Grenoble, France, September 9-12, 2024, Proceedings, Part II}, editor = {Goeuriot, Lorraine and Mulhem, Philippe and Qu{\´e}not, Georges and Schwab, Didier and Di Nunzio, Giorgio Maria and Soulier, Laure and Galušc{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a and Faggioli, Guglielmo and Ferro, Nicola}, publisher = {Springer Nature}, address = {Berlin}, isbn = {978-3-031-71907-3}, doi = {10.1007/978-3-031-71908-0}, pages = {28 -- 52}, abstract = {We describe the seventh edition of the CheckThat! lab, part of the 2024 Conference and Labs of the Evaluation Forum (CLEF). Previous editions of CheckThat! focused on the main tasks of the information verification pipeline: check-worthiness, identifying previously fact-checked claims, supporting evidence retrieval, and claim verification. In this edition, we introduced some new challenges, offering six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 on estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 on identification of subjectivity (a follow up of the CheckThat! 2023 edition), Task 3 on identification of the use of persuasion techniques (a follow up of SemEval 2023), Task 4 on detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 on rumor verification using evidence from authorities (new task), and Task 6 on robustness of credibility assessment with adversarial examples (new task). These are challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings. This year, CheckThat! was one of the most popular labs at CLEF-2024 in terms of team registrations: 130 teams. More than one-third of them (a total of 46) actually participated.}, subject = {Desinformation}, language = {en} }