@inproceedings{HigiNeurothStruss, author = {Higi, Leonard and Neuroth, Heike and Struß, Julia Maria}, title = {From Data Life Cycle to Urban Data Practice}, series = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, booktitle = {Nachhaltige Information - Information f{\"u}r Nachhaltigkeit}, editor = {Semar, Wolfgang}, publisher = {Verlag Werner H{\"u}lsbusch}, address = {Gl{\"u}ckstadt}, isbn = {978-3-86488-200-5}, issn = {0938-8710}, doi = {10.5281/zenodo.10009338}, pages = {393 -- 400}, abstract = {This paper presents a project that aims at supporting municipal stakeholders in rural areas by developing data-based applications in a collaborative process of stakeholders and a transfer team. In order to foster knowledge transfer in the domains of data literacy and data management on multiple levels a new approach is proposed that organizes the team competencies along the data life cycle.}, subject = {Forschungsdaten}, language = {en} } @inproceedings{BarronCedenoAlamCasellietal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Da San Martino, Giovanni and Elsayed, Tamer and Galassi, Andrea and Haouari, Fatima and Ruggeri, Federico and Struß, Julia Maria and Nath Nandi, Rabindra and Cheema, Gullal S. and Azizov, Dilshod and Nakov, Preslav}, title = {The CLEF-2023 CheckThat! Lab: Checkworthiness, Subjectivity, Political Bias, Factuality, and Authority}, series = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, booktitle = {Advances in Information Retrieval : 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2-6, 2023, Proceedings, Part III}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-28241-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-28241-6_59}, pages = {509 -- 517}, abstract = {The five editions of the CheckThat! lab so far have focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. The 2023 edition of the lab zooms into some of the problems and - for the first time - it offers five tasks in seven languages (Arabic, Dutch, English, German, Italian, Spanish, and Turkish): Task 1 asks to determine whether an item, text or a text plus an image, is check-worthy; Task 2 requires to assess whether a text snippet is subjective or not; Task 3 looks for estimating the political bias of a document or a news outlet; Task 4 requires to determine the level of factuality of a document or a news outlet; and Task 5 is about identifying authorities that should be trusted to verify a contended claim.}, subject = {Desinformation}, language = {en} } @inproceedings{BarronCedenoAlamChakrabortyetal., author = {Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Chakraborty, Tonmoy and Elsayed, Tamer and Nakov, Preslav and Przybyła, Piotr and Struß, Julia Maria and Haouari, Fatima and Hasanain, Maram and Ruggeri, Federico and Song, Xingyi and Suwaileh, Reem}, title = {The CLEF-2024 CheckThat! Lab}, series = {Advances in Information Retrieval}, booktitle = {Advances in Information Retrieval}, editor = {Goharian, Nazli and Tonellotto, Nicola and He, Yulan and Lipani, Aldo and McDonald, Graham and Macdonald, Craig and Ounis, Iadh}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {978-3-031-56069-9}, issn = {0302-9743}, doi = {10.1007/978-3-031-56069-9_62}, pages = {449 -- 458}, abstract = {The first five editions of the CheckThat! lab focused on the main tasks of the information verification pipeline: check-worthiness, evidence retrieval and pairing, and verification. Since the 2023 edition, it has been focusing on new problems that can support the research and decision making during the verification process. In this new edition, we focus on new problems and -for the first time- we propose six tasks in fifteen languages (Arabic, Bulgarian, English, Dutch, French, Georgian, German, Greek, Italian, Polish, Portuguese, Russian, Slovene, Spanish, and code-mixed Hindi-English): Task 1 estimation of check-worthiness (the only task that has been present in all CheckThat! editions), Task 2 identification of subjectivity (a follow up of CheckThat! 2023 edition), Task 3 identification of persuasion (a follow up of SemEval 2023), Task 4 detection of hero, villain, and victim from memes (a follow up of CONSTRAINT 2022), Task 5 Rumor Verification using Evidence from Authorities (a first), and Task 6 robustness of credibility assessment with adversarial examples (a first). These tasks represent challenging classification and retrieval problems at the document and at the span level, including multilingual and multimodal settings.}, subject = {Desinformation}, language = {en} }