@inproceedings{KoehlerShahiStrussetal., author = {K{\"o}hler, Juliane and Shahi, Gautam Kishore and Struß, Julia Maria and Wiegand, Michael and Siegel, Melanie and Mandl, Thomas and Sch{\"u}tz, Mina}, title = {Overview of the CLEF-2022 CheckThat! Lab: Task 3 on Fake News Detection}, series = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, booktitle = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, editor = {Faggiolo, Guglielmo and Ferro, Nicola and Hanburry, Allan and Potthast, Martin}, address = {Bologna}, organization = {University of Bologna}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3180-7}, pages = {404 -- 421}, abstract = {This paper describes the results of the CheckThat! Lab 2022 Task 3. This is the fifth edition of the lab, which concentrates on the evaluation of technologies supporting three tasks related to factuality. Task 3 is designed as a multi-class classification problem and focuses on the veracity of German and English news articles. The German subtask is ought to be solved using an cross-lingual approach while the English subtask was offered as mono-lingual task. The participants of the lab were provided an English training, development and test dataset as well as a German test dataset. In total, 25 teams submitted successful runs for the English subtask and 8 for the German subtask. The best performing system for the mono-lingual subtask achieved a macro F1-score of 0.339. The best system for the cross-lingual task achieved a macro F1-score of 0.242. In the paper at hand we will elaborate on the process of data collection, the task setup, the evaluation results and give a brief overview of the participating systems.}, subject = {Deep Learning}, language = {en} } @inproceedings{GalassiRuggeriBarronCedenoetal., author = {Galassi, Andrea and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Kutlu, Mucahid and Struß, Julia Maria and Antici, Francesco and Hasanain, Maram and K{\"o}hler, Juliane and Korre, Katerina and Leistra, Folkert and Muti, Arianna and Siegel, Melanie and T{\"u}rkmen, Mehmet Deniz and Wiegand, Michael and Zaghouani, Wajdi}, title = {Overview of the CLEF-2023 CheckThat! Lab: Task 2 on Subjectivity in News Articles}, series = {CLEF 2023 Working Notes}, booktitle = {CLEF 2023 Working Notes}, editor = {Aliannejadi, Mohammad and Faggiolo, Guglielmo and Ferro, Nicola and Vlachos, Michalis}, address = {Thessaloniki}, organization = {Centre for Research and Technology Hellas}, pages = {236 -- 249}, abstract = {We describe the outcome of the 2023 edition of the CheckThat!Lab at CLEF. We focus on subjectivity (Task 2), which has been proposed for the first time. It aims at fostering the technology for the identification of subjective text fragments in news articles. For that, we produced corpora consisting of 9,530 manually-annotated sentences, covering six languages - Arabic, Dutch, English, German, Italian, and Turkish. Task 2 attracted 12 teams, which submitted a total of 40 final runs covering all languages. The most successful approaches addressed the task using state-of-the-art multilingual transformer models, which were fine-tuned on language-specific data. Teams also experimented with a rich set of other neural architectures, including foundation models, zero-shot classifiers, and standard transformers, mainly coupled with data augmentation and multilingual training strategies to address class imbalance. We publicly release all the datasets and evaluation scripts, with the purpose of promoting further research on this topic.}, subject = {Desinformation}, language = {en} } @inproceedings{KoehlerStrussDriesen, author = {K{\"o}hler, Juliane and Struß, Julia Maria and Driesen, Maria}, title = {Seeking for Information about Environmental Sustainability}, series = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, booktitle = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, editor = {Semar, Wolfgang}, publisher = {Verlag Werner H{\"u}lsbusch}, address = {Gl{\"u}ckstadt}, doi = {10.5281/zenodo.10009338}, pages = {404 -- 410}, abstract = {In a time of climate change and growing activism, understanding how people access information is crucial. This research arises from ongoing government discussions on ecological policies and recent debates in Germany about ecological heating regulations and their cost implications. This study specifically focuses on the information seeking behavior on topics related to environmental sustainability. After reviewing existing research on information behavior, risk perception, and climate change communication, we investigate how participants search for information by surveying students. The findings could potentially inform strategies for sharing information and promoting action in Germany's context of climate change and sustainability.}, subject = {Nachhaltigkeit}, language = {en} } @inproceedings{NakovBarronCedenoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Cede{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Wiegand, Michael and Siegel, Melanie and K{\"o}hler, Juliane}, title = {Overview of the CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-13643-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-13643-6_29}, pages = {495 -- 520}, abstract = {We describe the fifth edition of the CheckThat! lab, part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality in multiple languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 asks to identify relevant claims in tweets in terms of check-worthiness, verifiability, harmfullness, and attention-worthiness. Task 2 asks to detect previously fact-checked claims that could be relevant to fact-check a new claim. It targets both tweets and political debates/speeches. Task 3 asks to predict the veracity of the main claim in a news article. CheckThat! was the most popular lab at CLEF-2022 in terms of team registrations: 137 teams. More than one-third (37\%) of them actually participated: 18, 7, and 26 teams submitted 210, 37, and 126 official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} }