@inproceedings{StrussMihaljevićDriesen, author = {Struß, Julia Maria and Mihaljević, Helena and Driesen, Maria}, title = {Where Is All the Data Hiding?}, series = {Datenstr{\"o}me und Kulturoasen - Die Informationswissenschaft als Bindeglied zwischen den Informationswelten : Proceedings des 18. internationalen Symposiums f{\"u}r Informationswissenschaft (ISI 2025)}, booktitle = {Datenstr{\"o}me und Kulturoasen - Die Informationswissenschaft als Bindeglied zwischen den Informationswelten : Proceedings des 18. internationalen Symposiums f{\"u}r Informationswissenschaft (ISI 2025)}, editor = {Eibl, Maximilian}, publisher = {Verlag Werner H{\"u}lsbusch}, address = {Boizenburg}, isbn = {978-3-86488-207-4}, issn = {0938-8710}, doi = {10.5281/zenodo.14925654}, pages = {190 -- 203}, abstract = {The paper presents first results of a systematic review on datasets created to address the rise of hate speech and related phenomena in online communication, with a specific focus on hosting and licensing practices of these datasets. The analysis shows that while the majority of the 133 datasets has been directly accessible, over half have not been published via dedicated research data repositories, instead being hosted on GitHub. Additionally, almost 45\% of the datasets have been released without any accompanying license. Contrary to expectations, the use of licenses and dedicated repositories has not increased over time. Furthermore, while datasets representing languages from the Global South were somewhat less frequently licensed and hosted on dedicated repositories, this trend was not statistically significant. Similarly, the data source itself had little measurable influence on licensing or hosting choices.}, subject = {FAIR data principles}, language = {en} } @inproceedings{KoehlerStrussDriesen, author = {K{\"o}hler, Juliane and Struß, Julia Maria and Driesen, Maria}, title = {Seeking for Information about Environmental Sustainability}, series = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, booktitle = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, editor = {Semar, Wolfgang}, publisher = {Verlag Werner H{\"u}lsbusch}, address = {Gl{\"u}ckstadt}, doi = {10.5281/zenodo.10009338}, pages = {404 -- 410}, abstract = {In a time of climate change and growing activism, understanding how people access information is crucial. This research arises from ongoing government discussions on ecological policies and recent debates in Germany about ecological heating regulations and their cost implications. This study specifically focuses on the information seeking behavior on topics related to environmental sustainability. After reviewing existing research on information behavior, risk perception, and climate change communication, we investigate how participants search for information by surveying students. The findings could potentially inform strategies for sharing information and promoting action in Germany's context of climate change and sustainability.}, subject = {Nachhaltigkeit}, language = {en} } @inproceedings{StrussSiegelRuppenhoferetal., author = {Struß, Julia Maria and Siegel, Melanie and Ruppenhofer, Josef and Wiegand, Michael and Klenner, Manfred}, title = {Overview of GermEval Task 2, 2019 shared task on the identification of offensive language}, series = {Proceedings of the 15th Conference on Natural Language Processing (KONVENS 2019)}, booktitle = {Proceedings of the 15th Conference on Natural Language Processing (KONVENS 2019)}, publisher = {German Society for Computational Linguistics \& Language Technology und Friedrich-Alexander-Universit{\"a}t Erlangen-N{\"u}rnberg}, address = {M{\"u}nchen}, doi = {10.5167/uzh-178687}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:525-26209}, pages = {352 -- 363}, abstract = {We present the second edition of the GermEval Shared Task on the Identification of Offensive Language. This shared task deals with the classification of German tweets from Twitter. Two subtasks were continued from the first edition, namely a coarse-grained binary classification task and a fine-grained multi-class classification task. As a novel subtask, we introduce the classification of offensive tweets as explicit or implicit. The shared task had 13 participating groups submitting 28 runs for the coarse-grained task, another 28 runs for the fine-grained task, and 17 runs for the implicit-explicit task. We evaluate the results of the systems submitted to the shared task. The shared task homepage can be found at https://projects.fzai.h-da.de/iggsa/}, subject = {Automatische Spracherkennung}, language = {en} } @inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, R{\´u}ben and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Babulkov, Nikolay and Nikolov, Alex and Shahi, Gautam Kishore and Struß, Julia Maria and Mandl, Thomas}, title = {The CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-72240-1}, doi = {10.1007/978-3-030-72240-1_75}, pages = {639 -- 649}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Cross-Language Evaluation Forum (CLEF). The lab evaluates technology supporting various tasks related to factuality, and it is offered in Arabic, Bulgarian, English, and Spanish. Task 1 asks to predict which tweets in a Twitter stream are worth fact-checking (focusing on COVID-19). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims. Task 3 asks to predict the veracity of a target news article and its topical domain. The evaluation is carried out using mean average precision or precision at rank k for the ranking tasks, and F1 for the classification tasks.}, subject = {Falschmeldung}, language = {en} } @inproceedings{NakovDaSanMartinoElsayedetal., author = {Nakov, Preslav and Da San Martino, Giovanni and Elsayed, Tamer and Barr{\´o}n-Cede{\~n}o, Alberto and M{\´i}guez, Rub{\´e}n and Shaar, Shaden and Alam, Firoj and Haouari, Fatima and Hasanain, Maram and Mansour, Watheq and Hamdan, Bayan and Sheikh Ali, Zien and Babulkov, Nikolay and Nikolov, Alex and Koshore Shahi, Gautam and Struß, Julia Maria and Mandl, Thomas and Kutlu, Mucahid and Selim Kartal, Yavuz}, title = {Overview of the CLEF-2021 CheckThat! Lab on Detecting Check-Worthy Claims, Previously Fact-Checked Claims, and Fake News}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-85251-1}, doi = {10.1007/978-3-030-85251-1_19}, pages = {264 -- 291}, abstract = {We describe the fourth edition of the CheckThat! Lab, part of the 2021 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality, and covers Arabic, Bulgarian, English, Spanish, and Turkish. Task 1 asks to predict which posts in a Twitter stream are worth fact-checking, focusing on COVID-19 and politics (in all five languages). Task 2 asks to determine whether a claim in a tweet can be verified using a set of previously fact-checked claims (in Arabic and English). Task 3 asks to predict the veracity of a news article and its topical domain (in English). The evaluation is based on mean average precision or precision at rank k for the ranking tasks, and macro-F1 for the classification tasks. This was the most popular CLEF-2021 lab in terms of team registrations: 132 teams. Nearly one-third of them participated: 15, 5, and 25 teams submitted official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} } @inproceedings{KastnerMandlStruss, author = {Kastner, Sebastian and Mandl, Thomas and Struß, Julia Maria}, title = {Identifikation von Kundenrezensionen im WWW als Basis eines Crawlers f{\"u}r das Opinion Mining}, series = {Informationswissenschaft zwischen virtueller Infrastruktur und materiellen Lebenswelten : Proceedings des 13. Internationalen Symposiums f{\"u}r Informationswissenschaft (ISI 2013), Potsdam, 19. bis 22. M{\"a}rz 2013 / hrsg. von Hans-Christoph Hobohm. - Gl{\"u}ckstadt: H{\"u}lsbusch, 2013. - 502 S. ISBN 978-3-86488-035-3}, booktitle = {Informationswissenschaft zwischen virtueller Infrastruktur und materiellen Lebenswelten : Proceedings des 13. Internationalen Symposiums f{\"u}r Informationswissenschaft (ISI 2013), Potsdam, 19. bis 22. M{\"a}rz 2013 / hrsg. von Hans-Christoph Hobohm. - Gl{\"u}ckstadt: H{\"u}lsbusch, 2013. - 502 S. ISBN 978-3-86488-035-3}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:525-4259}, abstract = {Diese Arbeit befasst sich mit der Klassifikation von Webseiten in solche, die Kundenrezensionen zu Produkten oder Dienstleistungen enthalten und solche, die keine enthalten. Die derart gewonnenen Rezensionen k{\"o}nnen anschließend als Input f{\"u}r Opinion-Mining-Systeme dienen, die sich mit der Extraktion und Klassifikation von Meinungen z. B. in der genannten Textsorte besch{\"a}ftigen. Erste Evaluierungsergebnisse deuten mit einer Accuracy von 91 Prozent auf einen vielversprechenden Ansatz hin.}, subject = {Klassifikation}, language = {de} } @inproceedings{KoehlerShahiStrussetal., author = {K{\"o}hler, Juliane and Shahi, Gautam Kishore and Struß, Julia Maria and Wiegand, Michael and Siegel, Melanie and Mandl, Thomas and Sch{\"u}tz, Mina}, title = {Overview of the CLEF-2022 CheckThat! Lab: Task 3 on Fake News Detection}, series = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, booktitle = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, editor = {Faggiolo, Guglielmo and Ferro, Nicola and Hanburry, Allan and Potthast, Martin}, address = {Bologna}, organization = {University of Bologna}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3180-7}, pages = {404 -- 421}, abstract = {This paper describes the results of the CheckThat! Lab 2022 Task 3. This is the fifth edition of the lab, which concentrates on the evaluation of technologies supporting three tasks related to factuality. Task 3 is designed as a multi-class classification problem and focuses on the veracity of German and English news articles. The German subtask is ought to be solved using an cross-lingual approach while the English subtask was offered as mono-lingual task. The participants of the lab were provided an English training, development and test dataset as well as a German test dataset. In total, 25 teams submitted successful runs for the English subtask and 8 for the German subtask. The best performing system for the mono-lingual subtask achieved a macro F1-score of 0.339. The best system for the cross-lingual task achieved a macro F1-score of 0.242. In the paper at hand we will elaborate on the process of data collection, the task setup, the evaluation results and give a brief overview of the participating systems.}, subject = {Deep Learning}, language = {en} } @inproceedings{StrussRuggeriBarronCedenoetal., author = {Struß, Julia Maria and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Dimitrov, Dimitar and Galassi, Andrea and Pachov, Georgi and Koychev, Ivan and Nakov, Preslav and Siegel, Melanie and Wiegand, Michael and Hasanain, Maram and Suwaileh, Reem and Zaghouani, Wajdi}, title = {Overview of the CLEF-2024 CheckThat! Lab Task 2 on Subjectivity in News Articles}, series = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, booktitle = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, editor = {Faggioli, Guglielmo and Ferro, Nicola and Galušč{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a}, address = {Frankreich}, organization = {University of Grenoble Alpes}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3740-3}, pages = {287 -- 298}, abstract = {We present an overview of Task 2 of the seventh edition of the CheckThat! lab at the 2024 iteration of the Conference and Labs of the Evaluation Forum (CLEF). The task focuses on subjectivity detection in news articles and was o ered in five languages: Arabic, Bulgarian, English, German, and Italian, as well as in a multilingual setting. The datasets for each language were carefully curated and annotated, comprising over 10,000 sentences from news articles. The task challenged participants to develop systems capable of distinguishing between subjective statements (refecting personal opinions or biases) and objective ones (presenting factual information) at the sentence level. A total of 15 teams participated in the task, submitting 36 valid runs across all language tracks. The participants used a variety of approaches, with transformer-based models being the most popular choice. Strategies included fine-tuning monolingual and multilingual models, and leveraging English models with automatic translation for the non-English datasets. Some teams also explored ensembles, feature engineering, and innovative techniques such as few-shot learning and in-context learning with large language models. The evaluation was based on macro-averaged F1 score. The results varied across languages, with the best performance achieved for Italian and German, followed by English. The Arabic track proved particularly challenging, with no team surpassing an F1 score of 0.50. This task contributes to the broader goal of enhancing the reliability of automated content analysis in the context of misinformation detection and fact-checking. The paper provides detailed insights into the datasets, participant approaches, and results, o ering a benchmark for the current state of subjectivity detection across multiple languages.}, subject = {Fehlinformation}, language = {en} } @inproceedings{BarronCedenoAlamChakrabortyetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Chakraborty, Tonmoy and Elsayed, Tamer and Nakov, Preslav and Przybyła, Piotr and Struß, Julia Maria and Haouari, Fatima and Hasanain, Maram and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {The CLEF-2024 CheckThat! Lab}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, editor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-56069-9}, issn = {0302-9743}, doi = {10.1007/978-3-031-56069-9_62}, pages = {449 -- 458}, abstract = {The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, it has been focusing on new problems that can support the research and decision making during the verification process. In this new edition, we focus on new problems and -for the first time- we propose six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 identification of subjectivity (a follow up of CheckThat! 2023 edition), Task 3 identification of persuasion (a follow up of SemEval 2023), Task 4 detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 Rumor Verification using Evidence from Authorities (a first), and Task 6 robustness of credibility assessment with adversarial examples (a first). These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamCasellietal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Da San Martino, Giovanni and Elsayed, Tamer and Galassi, Andrea and Haouari, Fatima and Ruggeri, Federico and Struß, Julia Maria and Nath Nandi, Rabindra and Cheema, Gullal S. and Azizov, Dilshod and Nakov, Preslav}, title = {The CLEF-2023 CheckThat! Lab: Checkworthiness, Subjectivity, Political Bias, Factuality, and Authority}, series = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, booktitle = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-28241-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-28241-6_59}, pages = {509 -- 517}, abstract = {The five editions of the CheckThat! lab so far have focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. The 2023 edition of the lab zooms into some of the problems and - for the first time - it offers five tasks in seven languages (Arabic, Dutch, English, German, Italian, Spanish, and Turkish): Task 1 asks to determine whether an item, text or a text plus an image, is check-worthy; Task 2 requires to assess whether a text snippet is subjective or not; Task 3 looks for estimating the political bias of a document or a news outlet; Task 4 requires to determine the level of factuality of a document or a news outlet; and Task 5 is about identifying authorities that should be trusted to verify a contended claim.}, subject = {Desinformation}, language = {en} }