@article{HenkelWiensHuberetal., author = {Henkel, Miriam and Wiens, Marleen and Huber, Dorothea and Staats, Hermann and Taubner, Svenja and Wiegand-Grefe, Silke and Buchholz, Michael B. and Frommer, J{\"o}rg and Benecke, Cord}, title = {Was berichten Patienten und Therapeuten {\"u}ber psychoanalytische Langzeittherapie?}, series = {Psychotherapeut}, volume = {61}, journal = {Psychotherapeut}, publisher = {Springer}, address = {Berlin}, issn = {0935-6185}, doi = {10.1007/s00278-016-0141-0}, pages = {484 -- 490}, abstract = {Hintergrund Analytische Psychotherapien (AP) stellen in der kassen{\"a}rztlichen Versorgung eine Minderheit dar. Zudem sind beantragte AP oft k{\"u}rzer und finden seltener pro Woche statt, als es von den Psychotherapierichtlinien vorgesehen wird. Im psychoanalytischen Kontext interessiert neben herk{\"o}mmlicher Psychotherapiewirksamkeitsforschung mithilfe von Fragebogen das subjektive Erleben der Patienten. Fragestellung Es wird untersucht, welches Setting Therapeuten in AP angeben, und was Patienten {\"u}ber ihre beendeten Therapien berichten. Material und Methoden Daten bereits abgeschlossener AP aus der noch laufenden DPG-Praxisstudie (einer prospektiven naturalistischen Studie zur Untersuchung der Wirksamkeit psychoanalytisch begr{\"u}ndeter Psychotherapien) werden analysiert. F{\"u}r die Therapeutenseite (n = 108) werden Angaben in Fragebogen betrachtet; auf Patientenseite werden Freitexte der Patienten (n = 37) anhand einer qualitativen Inhaltsanalyse kategorisiert. Ergebnisse In Langzeittherapien (LZT) der DPG-Praxisstudie wird das Stundenkontingent gem{\"a}ß Psychotherapierichtlinien h{\"a}ufig ausgesch{\"o}pft, und AP im ersten Jahr werden oft im Liegen und mit 2 bis 3 Wochenstunden durchgef{\"u}hrt. Die Analyse der Patientenberichte zeigt u. a., dass die Patienten ihre Behandlungen h{\"a}ufig als anstrengend empfinden, insgesamt aber zufrieden mit den Behandlungen sind. Schlussfolgerung Analytische Psychotherapie wird - zumindest in dieser Studie - sehr oft gem{\"a}ß den Richtlinien durchgef{\"u}hrt. Die Patientenberichte geben einen Einblick, der nah am Erleben der Patienten ist und damit eine vielversprechende Forschungsperspektive er{\"o}ffnet.}, subject = {Analytische Therapie }, language = {de} } @inproceedings{StrussSiegelRuppenhoferetal., author = {Struß, Julia Maria and Siegel, Melanie and Ruppenhofer, Josef and Wiegand, Michael and Klenner, Manfred}, title = {Overview of GermEval Task 2, 2019 shared task on the identification of offensive language}, series = {Proceedings of the 15th Conference on Natural Language Processing (KONVENS 2019)}, booktitle = {Proceedings of the 15th Conference on Natural Language Processing (KONVENS 2019)}, publisher = {German Society for Computational Linguistics \& Language Technology und Friedrich-Alexander-Universit{\"a}t Erlangen-N{\"u}rnberg}, address = {M{\"u}nchen}, doi = {10.5167/uzh-178687}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:525-26209}, pages = {352 -- 363}, abstract = {We present the second edition of the GermEval Shared Task on the Identification of Offensive Language. This shared task deals with the classification of German tweets from Twitter. Two subtasks were continued from the first edition, namely a coarse-grained binary classification task and a fine-grained multi-class classification task. As a novel subtask, we introduce the classification of offensive tweets as explicit or implicit. The shared task had 13 participating groups submitting 28 runs for the coarse-grained task, another 28 runs for the fine-grained task, and 17 runs for the implicit-explicit task. We evaluate the results of the systems submitted to the shared task. The shared task homepage can be found at https://projects.fzai.h-da.de/iggsa/}, subject = {Automatische Spracherkennung}, language = {en} } @inproceedings{KoehlerShahiStrussetal., author = {K{\"o}hler, Juliane and Shahi, Gautam Kishore and Struß, Julia Maria and Wiegand, Michael and Siegel, Melanie and Mandl, Thomas and Sch{\"u}tz, Mina}, title = {Overview of the CLEF-2022 CheckThat! Lab: Task 3 on Fake News Detection}, series = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, booktitle = {CLEF 2022 Working Notes : Proceedings of the Working Notes of CLEF 2022 - Conference and Labs of the Evaluation Forum}, editor = {Faggiolo, Guglielmo and Ferro, Nicola and Hanburry, Allan and Potthast, Martin}, address = {Bologna}, organization = {University of Bologna}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3180-7}, pages = {404 -- 421}, abstract = {This paper describes the results of the CheckThat! Lab 2022 Task 3. This is the fifth edition of the lab, which concentrates on the evaluation of technologies supporting three tasks related to factuality. Task 3 is designed as a multi-class classification problem and focuses on the veracity of German and English news articles. The German subtask is ought to be solved using an cross-lingual approach while the English subtask was offered as mono-lingual task. The participants of the lab were provided an English training, development and test dataset as well as a German test dataset. In total, 25 teams submitted successful runs for the English subtask and 8 for the German subtask. The best performing system for the mono-lingual subtask achieved a macro F1-score of 0.339. The best system for the cross-lingual task achieved a macro F1-score of 0.242. In the paper at hand we will elaborate on the process of data collection, the task setup, the evaluation results and give a brief overview of the participating systems.}, subject = {Deep Learning}, language = {en} } @inproceedings{StrussRuggeriBarronCedenoetal., author = {Struß, Julia Maria and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Dimitrov, Dimitar and Galassi, Andrea and Pachov, Georgi and Koychev, Ivan and Nakov, Preslav and Siegel, Melanie and Wiegand, Michael and Hasanain, Maram and Suwaileh, Reem and Zaghouani, Wajdi}, title = {Overview of the CLEF-2024 CheckThat! Lab Task 2 on Subjectivity in News Articles}, series = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, booktitle = {CLEF 2024 Working Notes : Working Notes of the Conference and Labs of the Evaluation Forum (CLEF 2024)}, editor = {Faggioli, Guglielmo and Ferro, Nicola and Galušč{\´a}kov{\´a}, Petra and Seco de Herrera, Alba Garc{\´i}a}, address = {Frankreich}, organization = {University of Grenoble Alpes}, issn = {1613-0073}, url = {http://nbn-resolving.de/urn:nbn:de:0074-3740-3}, pages = {287 -- 298}, abstract = {We present an overview of Task 2 of the seventh edition of the CheckThat! lab at the 2024 iteration of the Conference and Labs of the Evaluation Forum (CLEF). The task focuses on subjectivity detection in news articles and was o ered in five languages: Arabic, Bulgarian, English, German, and Italian, as well as in a multilingual setting. The datasets for each language were carefully curated and annotated, comprising over 10,000 sentences from news articles. The task challenged participants to develop systems capable of distinguishing between subjective statements (refecting personal opinions or biases) and objective ones (presenting factual information) at the sentence level. A total of 15 teams participated in the task, submitting 36 valid runs across all language tracks. The participants used a variety of approaches, with transformer-based models being the most popular choice. Strategies included fine-tuning monolingual and multilingual models, and leveraging English models with automatic translation for the non-English datasets. Some teams also explored ensembles, feature engineering, and innovative techniques such as few-shot learning and in-context learning with large language models. The evaluation was based on macro-averaged F1 score. The results varied across languages, with the best performance achieved for Italian and German, followed by English. The Arabic track proved particularly challenging, with no team surpassing an F1 score of 0.50. This task contributes to the broader goal of enhancing the reliability of automated content analysis in the context of misinformation detection and fact-checking. The paper provides detailed insights into the datasets, participant approaches, and results, o ering a benchmark for the current state of subjectivity detection across multiple languages.}, subject = {Fehlinformation}, language = {en} } @inproceedings{GalassiRuggeriBarronCedenoetal., author = {Galassi, Andrea and Ruggeri, Federico and Barr{\´o}n-Cede{\~n}o, Alberto and Alam, Firoj and Caselli, Tommaso and Kutlu, Mucahid and Struß, Julia Maria and Antici, Francesco and Hasanain, Maram and K{\"o}hler, Juliane and Korre, Katerina and Leistra, Folkert and Muti, Arianna and Siegel, Melanie and T{\"u}rkmen, Mehmet Deniz and Wiegand, Michael and Zaghouani, Wajdi}, title = {Overview of the CLEF-2023 CheckThat! Lab: Task 2 on Subjectivity in News Articles}, series = {CLEF 2023 Working Notes}, booktitle = {CLEF 2023 Working Notes}, editor = {Aliannejadi, Mohammad and Faggiolo, Guglielmo and Ferro, Nicola and Vlachos, Michalis}, address = {Thessaloniki}, organization = {Centre for Research and Technology Hellas}, pages = {236 -- 249}, abstract = {We describe the outcome of the 2023 edition of the CheckThat!Lab at CLEF. We focus on subjectivity (Task 2), which has been proposed for the first time. It aims at fostering the technology for the identification of subjective text fragments in news articles. For that, we produced corpora consisting of 9,530 manually-annotated sentences, covering six languages - Arabic, Dutch, English, German, Italian, and Turkish. Task 2 attracted 12 teams, which submitted a total of 40 final runs covering all languages. The most successful approaches addressed the task using state-of-the-art multilingual transformer models, which were fine-tuned on language-specific data. Teams also experimented with a rich set of other neural architectures, including foundation models, zero-shot classifiers, and standard transformers, mainly coupled with data augmentation and multilingual training strategies to address class imbalance. We publicly release all the datasets and evaluation scripts, with the purpose of promoting further research on this topic.}, subject = {Desinformation}, language = {en} } @inproceedings{NakovBarronCedenoDaSanMartinoetal., author = {Nakov, Preslav and Barr{\´o}n-Cede{\~n}o, Alberto and Da San Martino, Giovanni and Alam, Firoj and Struß, Julia Maria and Mandl, Thomas and M{\´i}guez, Rub{\´e}n and Caselli, Tommaso and Kutlu, Mucahid and Zaghouani, Wajdi and Li, Chengkai and Shaar, Shaden and Shahi, Gautam Kishore and Mubarak, Hamdy and Nikolov, Alex and Babulkov, Nikolay and Kartal, Yavuz Selim and Wiegand, Michael and Siegel, Melanie and K{\"o}hler, Juliane}, title = {Overview of the CLEF-2022 CheckThat! Lab on Fighting the COVID-19 Infodemic and Fake News Detection}, series = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, booktitle = {Experimental IR Meets Multilinguality, Multimodality, and Interaction. CLEF 2022.}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-13643-6}, issn = {0302-9743}, doi = {10.1007/978-3-031-13643-6_29}, pages = {495 -- 520}, abstract = {We describe the fifth edition of the CheckThat! lab, part of the 2022 Conference and Labs of the Evaluation Forum (CLEF). The lab evaluates technology supporting tasks related to factuality in multiple languages: Arabic, Bulgarian, Dutch, English, German, Spanish, and Turkish. Task 1 asks to identify relevant claims in tweets in terms of check-worthiness, verifiability, harmfullness, and attention-worthiness. Task 2 asks to detect previously fact-checked claims that could be relevant to fact-check a new claim. It targets both tweets and political debates/speeches. Task 3 asks to predict the veracity of the main claim in a news article. CheckThat! was the most popular lab at CLEF-2022 in terms of team registrations: 137 teams. More than one-third (37\%) of them actually participated: 18, 7, and 26 teams submitted 210, 37, and 126 official runs for tasks 1, 2, and 3, respectively.}, subject = {Desinformation}, language = {en} }