@inproceedings{AsadiiKolagarLiebeletal., author = {Asadii, Shima and Kolagar, Zahra and Liebel, Alina and Zarcone, Alessandra}, title = {GiCCS: A German in-Context Conversational Similarity Benchmark}, series = {Proceedings of the 2nd Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)}, booktitle = {Proceedings of the 2nd Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)}, publisher = {Association for Computational Linguistics}, isbn = {978-1-959429-12-8}, pages = {351 -- 362}, language = {en} } @article{FrommherzZarcone, author = {Frommherz, Yannick and Zarcone, Alessandra}, title = {Crowdsourcing Ecologically-Valid Dialogue Data for German}, series = {Frontiers in computer science}, journal = {Frontiers in computer science}, number = {2021,3}, issn = {2624-9898}, doi = {10.3389/fcomp.2021.686050}, language = {en} } @inproceedings{ZarconeDemberg, author = {Zarcone, Alessandra and Demberg, Vera}, title = {A Bathtub by Any Other Name: the Reduction of German Compounds in Predictive Contexts}, series = {Proceedings of the Annual Meeting of the Cognitive Science Society}, volume = {43}, booktitle = {Proceedings of the Annual Meeting of the Cognitive Science Society}, issn = {1069-7977}, pages = {749 -- 755}, language = {en} } @article{ZarconeDemberg, author = {Zarcone, Alessandra and Demberg, Vera}, title = {Interaction of Script Knowledge and Temporal Discourse Cues in a Visual World Study}, series = {Discourse Processes}, volume = {58}, journal = {Discourse Processes}, number = {2021,9}, issn = {1532-6950}, doi = {10.1080/0163853X.2021.1930807}, pages = {804 -- 819}, language = {en} } @inproceedings{AlamZarconePado, author = {Alam, Touhidul and Zarcone, Alessandra and Pad{\´o}, Sebastian}, title = {New Domain, Major Effort? How Much Data is Necessary to Adapt a Temporal Tagger to the Voice Assistant Domain}, series = {Proceedings of the 14th International Conference on Computational Semantics}, booktitle = {Proceedings of the 14th International Conference on Computational Semantics}, publisher = {Association for Computational Linguistics}, pages = {144 -- 154}, language = {en} } @inproceedings{ZarconeLehmannHabets, author = {Zarcone, Alessandra and Lehmann, Jens and Habets, Emanu{\"e}l A. P.}, title = {Small Data in NLU: Proposals towards a Data-Centric Approach}, series = {NeurIPS Data-Centric AI Workshop}, booktitle = {NeurIPS Data-Centric AI Workshop}, isbn = {9781713845393}, pages = {6}, language = {en} } @inproceedings{HrycykZarconeHahn, author = {Hrycyk, Lianna and Zarcone, Alessandra and Hahn, Luzian}, title = {Not So Fast, Classifier - Accuracy and Entropy Reduction in Incremental Intent Classification}, series = {Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI}, booktitle = {Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI}, publisher = {Association for Computational Linguistics}, doi = {10.18653/v1/2021.nlp4convai-1.6}, pages = {52 -- 67}, language = {en} } @inproceedings{ZarconeAlamKolagar, author = {Zarcone, Alessandra and Alam, Touhidul and Kolagar, Zahra}, title = {P{\^A}T{\´E}: A Corpus of Temporal Expressions for the In-car Voice Assistant Domain}, series = {Proceedings of the Twelfth Language Resources and Evaluation Conference}, booktitle = {Proceedings of the Twelfth Language Resources and Evaluation Conference}, publisher = {European Language Resources Association}, pages = {523 -- 530}, language = {en} } @incollection{ZarconeLeschanowsky, author = {Zarcone, Alessandra and Leschanowsky, Anna}, title = {Moderne Sprachassistenten zwischen Datenhunger und Datenschutz}, series = {K{\"u}nstliche Intelligenz und menschliche Gesellschaft}, booktitle = {K{\"u}nstliche Intelligenz und menschliche Gesellschaft}, editor = {Kov{\´a}cs, L{\´a}szl{\´o}}, publisher = {De Gruyter Oldenbourg}, address = {Berlin; M{\"u}nchen; Boston}, isbn = {978-3-11-103470-6}, doi = {10.1515/9783111034706-012}, pages = {167 -- 180}, language = {de} } @inproceedings{KolagarSteindlZarcone, author = {Kolagar, Zahra and Steindl, Sebastian and Zarcone, Alessandra}, title = {EduQuick: A Dataset Toward Evaluating Summarization of Informal Educational Content for Social Media}, series = {The 4th Workshop on Evaluation and Comparison of NLP Systems (Eval4NLP 2023): Proceedings of the Workshop}, booktitle = {The 4th Workshop on Evaluation and Comparison of NLP Systems (Eval4NLP 2023): Proceedings of the Workshop}, editor = {Deutsch, Daniel and Dror, Rotem and Eger, Steffen and Gao, Yang and Leiter, Christoph and Opitz, Juri and R{\"u}ckl{\´e}, Andreas}, isbn = {979-8-89176-021-9}, pages = {32 -- 48}, abstract = {This study explores the capacity of large language models (LLMs) to efficiently generate summaries of informal educational content tailored for platforms like TikTok. It also investigates how both humans and LLMs assess the quality of these summaries, based on a series of experiments, exploring the potential replacement of human evaluation with LLMs. Furthermore, the study delves into how experienced content creators perceive the utility of automatic summaries for TikTok videos. We employ strategic prompt selection techniques to guide LLMs in producing engaging summaries based on the characteristics of viral TikTok content, including hashtags, captivating hooks, storytelling, and user engagement. The study leverages OpenAI's GPT-4 model to generate TikTok content summaries, aiming to align them with the essential features identified. By employing this model and incorporating human evaluation and expert assessment, this research endeavors to shed light on the intricate dynamics of modern content creation, where AI and human ingenuity converge. Ultimately, it seeks to enhance strategies for disseminating and evaluating educational information effectively in the realm of social media.}, language = {en} } @inproceedings{ZarconeKopf, author = {Zarcone, Alessandra and Kopf, Fabian}, title = {Bubble up - A Fine-tuning Approach for Style Transfer to Community-specific Subreddit Language}, series = {Proceedings of the 3rd Workshop on Computational Linguistics for the Political and Social Sciences (CPSS 2023)}, booktitle = {Proceedings of the 3rd Workshop on Computational Linguistics for the Political and Social Sciences (CPSS 2023)}, editor = {Klamm, Christopher and Lapesa, Gabriella and Gold, Valentin and Gessler, Theresa and Paolo Ponzetto, Simone}, isbn = {979-8-89176-032-5}, pages = {46 -- 58}, abstract = {Different online communities (social media bubbles) can be identified with their use of language. We looked at different social media bubbles and explored the task of translating between the language of one bubble into another while maintaining the intended meaning. We collected a dataset of Reddit comments from 20 different Subreddits and for a smaller subset of them we obtained style-neutral versions generated by a large language model. Then we used the dataset to fine-tune different (smaller) language models to learn style transfers between social media bubbles. We evaluated the models on unseen data from four unseen social media bubbles to assess to what extent they had learned the style transfer task and compared their performance with the zero-shot performance of a larger, non-fine tuned, language model. We show that with a small amount of fine-tuning the smaller models achieve satisfactory performance, making them more attractive than a larger, more resource-intensive model.}, language = {en} } @inproceedings{KumarZarcone, author = {Kumar, Saurabh and Zarcone, Alessandra}, title = {Including a Contemporary NLP Application within an Introductory Course: an Example with Student Feedback from a University of Applied Sciences}, series = {Proceedings of the 1st Workshop on Teaching for NLP (Teach4NLP 2023)}, booktitle = {Proceedings of the 1st Workshop on Teaching for NLP (Teach4NLP 2023)}, editor = {Friedrich, Annemarie and Gr{\"u}newald, Stefan and Mieskes, Margot and Str{\"o}tgen, Jannik and Wartena, Christian}, isbn = {979-8-89176-030-1}, pages = {1 -- 7}, language = {en} } @inproceedings{KolagarZarcone, author = {Kolagar, Zahra and Zarcone, Alessandra}, title = {Aligning Uncertainty: Leveraging LLMs to Analyze Uncertainty Transfer in Text Summarization}, series = {Proceedings of the 1st Workshop on Uncertainty-Aware NLP (UncertaiNLP 2024)}, booktitle = {Proceedings of the 1st Workshop on Uncertainty-Aware NLP (UncertaiNLP 2024)}, isbn = {979-8-89176-072-1}, pages = {41 -- 61}, language = {en} } @inproceedings{KolagarZarcone, author = {Kolagar, Zahra and Zarcone, Alessandra}, title = {HumSum: A Personalized Lecture Summarization Tool for Humanities Students Using LLMs}, series = {Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024)}, booktitle = {Proceedings of the 1st Workshop on Personalization of Generative AI Systems (PERSONALIZE 2024)}, isbn = {979-8-89176-072-1}, pages = {36 -- 70}, language = {en} }