@inproceedings{SchullerBatlinerAmiriparianetal.2022, author = {Schuller, Bj{\"o}rn and Batliner, Anton and Amiriparian, Shahin and Bergler, Christian and Gerczuk, Maurice and Holz, Natalie and Larrouy-Maestri, Pauline and Bayerl, Sebastian P. and Riedhammer, Korbinian and Mallol-Ragolta, Adria and Pateraki, Maria and Coppock, Harry and Kiskin, Ivan and Sinka, Marianne and Roberts, Stephen}, title = {The ACM Multimedia 2022 Computational Paralinguistics Challenge}, doi = {10.1145/3503161.3551591}, pages = {7120-7124}, year = {2022}, abstract = {The ACM Multimedia 2022 Computational Paralinguistics Challenge addresses four different problems for the first time in a research competition under well-defined conditions: In the Vocalisations and Stuttering Sub-Challenges, a classification on human non-verbal vocalisations and speech has to be made; the Activity Sub-Challenge aims at beyond-audio human activity recognition from smartwatch sensor data; and in the Mosquitoes Sub-Challenge, mosquitoes need to be detected. We describe the Sub-Challenges, baseline feature extraction, and classifiers based on the 'usual' ComParE and BoAW features, the auDeep toolkit, and deep feature extraction from pre-trained CNNs using the DeepSpectrum toolkit; in addition, we add end-to-end sequential modelling, and a log-mel-128-BNN.}, language = {en} } @inproceedings{BayerlWenningerSchmidtetal.2021, author = {Bayerl, Sebastian P. and Wenninger, Marc and Schmidt, Jochen and Wolff von Gudenberg, Alexander and Riedhammer, Korbinian}, title = {STAN: A stuttering therapy analysis helper}, doi = {10.48550/arXiv.2106.09545}, pages = {2}, year = {2021}, abstract = {Stuttering is a complex speech disorder identified by repeti-tions, prolongations of sounds, syllables or words and blockswhile speaking. Specific stuttering behaviour differs strongly,thus needing personalized therapy. Therapy sessions requirea high level of concentration by the therapist. We introduceSTAN, a system to aid speech therapists in stuttering therapysessions. Such an automated feedback system can lower thecognitive load on the therapist and thereby enable a more con-sistent therapy as well as allowing analysis of stuttering overthe span of multiple therapy sessions.}, language = {en} } @techreport{TrumpAgcharBaumannetal.2021, author = {Trump, Sebastian and Agchar, Ismael and Baumann, Ilja and Braun, Franziska and Riedhammer, Korbinian and Siemandel, Lea and Ullrich, Martin}, title = {Spirio Sessions}, pages = {7}, year = {2021}, abstract = {This paper presents an ongoing interdisciplinary research project that deals with free improvisation and human-machine interaction, involving a digital player piano and other musical instruments. Various technical concepts are developed by student participants in the project and continuously evaluated in artistic performances. Our goal is to explore methods for co-creative collaborations with artificial intelligences embodied in the player piano, enabling it to act as an equal improvisation partner for human musicians.}, language = {en} } @inproceedings{WagnerBaumannBayerletal.2023, author = {Wagner, Dominik and Baumann, Ilja and Bayerl, Sebastian P. and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Speaker Adaptation for End-To-End Speech Recognition Systems in Noisy Environments}, doi = {10.48550/arXiv.2211.08774}, pages = {6}, year = {2023}, abstract = {We analyze the impact of speaker adaptation in end-to-end automatic speech recognition models based on transformers and wav2vec 2.0 under different noise conditions. By including speaker embeddings obtained from x-vector and ECAPA-TDNN systems, as well as i-vectors, we achieve relative word error rate improvements of up to 16.3\% on LibriSpeech and up to 14.5\% on Switchboard. We show that the proven method of concatenating speaker vectors to the acoustic features and supplying them as auxiliary model inputs remains a viable option to increase the robustness of end-to-end architectures. The effect on transformer models is stronger, when more noise is added to the input speech. The most substantial benefits for systems based on wav2vec 2.0 are achieved under moderate or no noise conditions. Both x-vectors and ECAPA-TDNN embeddings outperform i-vectors as speaker representations. The optimal embedding size depends on the dataset and also varies with the noise condition.}, language = {en} } @inproceedings{BayerlBrasserBuschetal.2019, author = {Bayerl, Sebastian P. and Brasser, Ferdinand and Busch, Christoph and Frassetto, Tommaso and Jauernig, Patrick and Kolberg, Jascha and Nautsch, Andreas and Riedhammer, Korbinian and Sadeghi, Ahmad-Reza and Schneider, Thomas and Stapf, Emmanuel and Treiber, Amos and Weinert, Christian}, title = {Privacy-preserving speech processing via STPC and TEEs (Poster)}, year = {2019}, language = {en} } @inproceedings{WagnerBaumannRanzenbergeretal.2024, author = {Wagner, Dominik and Baumann, Ilja and Ranzenberger, Thomas and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Personalizing Large Sequence-to-Sequence Speech Foundation Models With Speaker Representations}, doi = {10.1109/SLT61566.2024.10832252}, pages = {1-6}, year = {2024}, abstract = {We present a method to personalize large transformer-based encoderdecoder speech foundation models without the need for changes in the underlying model structure or training from scratch. This is achieved by projecting speaker-specific information into the latent space of the transformer decoder via a small neural network and learning to process the speaker information along with domainspecific information via parameter-efficient finetuning. We use this method to improve the automatic speech recognition results of spoken academic German and English. Our approach yields average relative word error rate (WER) improvements of approximately 29\% on German academic speech and 25\% on English academic speech. It also translates well to conversational speech, achieving relative WER improvements of up to 36\%, and demonstrates modest gains of up to 5\% on read speech. Moreover, we observe that incorporating utterances from the recent past as personalization context yields the most significant overall improvements and that changes in voice characteristics resulting from prolonged speaking have a minimal effect on the personalization quality of academic lectures.}, language = {en} } @inproceedings{WagnerBaumannEngertetal.2025, author = {Wagner, Dominik and Baumann, Ilja and Engert, Natalie and Lee, Seanie and N{\"o}th, Elmar and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Personalized Fine-Tuning with Controllable Synthetic Speech from LLM-Generated Transcripts for Dysarthric Speech Recognition}, series = {Interspeech 2025}, booktitle = {Interspeech 2025}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2025-2155}, pages = {3294 -- 3298}, year = {2025}, abstract = {In this work, we present our submission to the Speech Accessibility Project challenge for dysarthric speech recognition. We integrate parameter-efficient fine-tuning with latent audio representations to improve an encoder-decoder ASR system. Synthetic training data is generated by fine-tuning Parler-TTS to mimic dysarthric speech, using LLM-generated prompts for corpus-consistent target transcripts. Personalization with x-vectors consistently reduces word error rates (WERs) over non-personalized fine-tuning. AdaLoRA adapters outperform full fine-tuning and standard low-rank adaptation, achieving relative WER reductions of ∼23\% and ∼22\%, respectively. Further improvements (∼5\% WER reduction) come from incorporating wav2vec 2.0-based audio representations. Training with synthetic dysarthric speech yields up to ∼7\% relative WER improvement over personalized fine-tuning alone.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2025, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Pathology-Aware Speech Encoding and Data Augmentation for Dysarthric Speech Recognition}, series = {Interspeech 2025}, booktitle = {Interspeech 2025}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2025-2724}, pages = {3289 -- 3293}, year = {2025}, abstract = {Automatic speech recognition (ASR) for pathologic speech remains a major challenge due to high variability in articulation, phonation, and prosody distortions. In this work, we propose a pathology-aware speech encoder based on BEST-RQ pre-training, which incorporates 46k hours of speech, including pathologic and atypical speech. We continue pre-training for domain adaptation and experiment with etiology-specific codebooks. We achieve a 13.2\% relative word error rate (WER) improvement using the pathology-aware speech encoder with etiology-specific continued pre-training. Additionally, we examine the impact of incorporating synthetic and out-of-domain (OOD) data to further enhance ASR performance. Synthetic data reduces WER by up to 8.7\%, while OOD data improves WER by 12.2\%. Finally, we introduce a semantic similaritybased data augmentation technique to optimize data selection, achieving a WER improvement of up to 9.7\% while minimizing the need for additional training data.}, language = {en} } @inproceedings{WagnerBaumannRiedhammeretal.2024, author = {Wagner, Dominik and Baumann, Ilja and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Outlier Reduction with Gated Attention for Improved Post-training Quantization in Large Sequence-to-sequence Speech Foundation Models}, editor = {Pesak, Krisztina}, doi = {10.21437/Interspeech.2024-2105}, pages = {4623 -- 4627}, year = {2024}, abstract = {This paper explores the improvement of post-training quantization (PTQ) after knowledge distillation in the Whisper speech foundation model family. We address the challenge of outliers in weights and activation tensors, known to impede quantization quality in transformer-based language and vision models. Extending this observation to Whisper, we demonstrate that these outliers are also present when transformer-based models are trained to perform automatic speech recognition, necessitating mitigation strategies for PTQ. We show that outliers can be reduced by a recently proposed gating mechanism in the attention blocks of the student model, enabling effective 8-bit quantization, and lower word error rates compared to student models without the gating mechanism in place.}, language = {en} } @inproceedings{WagnerLeeBaumannetal.2024, author = {Wagner, Dominik and Lee, Seanie and Baumann, Ilja and Seeberger, Philipp and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Optimized Speculative Sampling for {GPU} Hardware Accelerators}, series = {Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}, booktitle = {Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing}, publisher = {Association for Computational Linguistics}, address = {Miami, Florida, USA}, doi = {10.18653/v1/2024.emnlp-main.370}, pages = {6442-6458}, year = {2024}, abstract = {In this work, we optimize speculative sampling for parallel hardware accelerators to improve sampling speed. We notice that substantial portions of the intermediate matrices necessary for speculative sampling can be computed concurrently. This allows us to distribute the workload across multiple GPU threads, enabling simultaneous operations on matrix segments within thread blocks. This results in profiling time improvements ranging from 6\% to 13\% relative to the baseline implementation, without compromising accuracy. To further accelerate speculative sampling, probability distributions parameterized by softmax are approximated by sigmoid. This approximation approach results in significantly greater relative improvements in profiling time, ranging from 37\% to 94\%, with a minor decline in accuracy. We conduct extensive experiments on both automatic speech recognition and summarization tasks to validate the effectiveness of our optimization methods.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2025, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Optimized Self-supervised Training with BEST-RQ for Speech Recognition}, series = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher = {IEEE}, doi = {10.1109/ICASSP49660.2025.10889362}, pages = {1 -- 5}, year = {2025}, abstract = {Self-supervised learning has been successfully used for various speech related tasks, including automatic speech recognition. BERT-based Speech pre-Training with Random-projection Quantizer (BEST-RQ) has achieved state-of-the-art results in speech recognition. In this work, we further optimize the BEST-RQ approach using Kullback-Leibler divergence as an additional regularizing loss and multicodebook extension per cluster derived from low-level feature clustering. Preliminary experiments on train-100 split of LibriSpeech result in a relative improvement of 11.2\% on test-clean by using multiple codebooks, utilizing a combination of cross-entropy and Kullback-Leibler divergence further reduces the word error rate by 4.5\%. The proposed optimizations on full LibriSpeech pre-training and fine-tuning result in relative word error rate improvements of up to 23.8\% on test-clean and 30.6\% on testother using 6 codebooks. Furthermore, the proposed setup leads to faster convergence in pre-training and fine-tuning and additionally stabilizes the pre-training.}, language = {en} } @inproceedings{BayerlFrassettoJauernigetal.2020, author = {Bayerl, Sebastian P. and Frassetto, Tommaso and Jauernig, Patrick and Riedhammer, Korbinian and Sadeghi, Ahmad-Reza and Schneider, Thomas and Stapf, Emmanuel and Weinert, Christian}, title = {Offline Model Guard: Secure and Private ML on Mobile Devices}, series = {2020 Design, Automation \& Test in Europe Conference \& Exhibition (DATE)}, booktitle = {2020 Design, Automation \& Test in Europe Conference \& Exhibition (DATE)}, publisher = {IEEE}, doi = {10.23919/DATE48585.2020.9116560}, pages = {6}, year = {2020}, abstract = {Performing machine learning tasks in mobile applications yields a challenging conflict of interest: highly sensitive client information (e.g., speech data) should remain private while also the intellectual property of service providers (e.g., model parameters) must be protected. Cryptographic techniques offer secure solutions for this, but have an unacceptable overhead and moreover require frequent network interaction.In this work, we design a practically efficient hardware-based solution. Specifically, we build OFFLINE MODEL GUARD (OMG) to enable privacy-preserving machine learning on the predominant mobile computing platform ARM—even in offline scenarios. By leveraging a trusted execution environment for strict hardware-enforced isolation from other system components, OMG guarantees privacy of client data, secrecy of provided models, and integrity of processing algorithms. Our prototype implementation on an ARM HiKey 960 development board performs privacy-preserving keyword recognition using TensorFlow Lite for Microcontrollers in real time.}, language = {en} } @inproceedings{SeebergerWagnerRiedhammer2024, author = {Seeberger, Philipp and Wagner, Dominik and Riedhammer, Korbinian}, title = {Multimodal Multimedia Event Argument Extraction with Unified Template Filling}, series = {Findings of the Association for Computational Linguistics: EMNLP 2024}, booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2024}, publisher = {Association for Computational Linguistics}, address = {Miami, Florida, USA}, doi = {10.18653/v1/2024.findings-emnlp.381}, pages = {6539-6548}, year = {2024}, abstract = {With the advancement of multimedia technologies, news documents and user-generated content are often represented as multiple modalities, making Multimedia Event Extraction (MEE) an increasingly important challenge. However, recent MEE methods employ weak alignment strategies and data augmentation with simple classification models, which ignore the capabilities of natural language-formulated event templates for the challenging Event Argument Extraction (EAE) task. In this work, we focus on EAE and address this issue by introducing a unified template filling model that connects the textual and visual modalities via textual prompts. This approach enables the exploitation of cross-ontology transfer and the incorporation of event-specific semantics. Experiments on the M2E2 benchmark demonstrate the effectiveness of our approach. Our system surpasses the current SOTA on textual EAE by +7\% F1, and performs generally better than the second-best systems for multimedia EAE.}, language = {en} } @inproceedings{SeebergerRiedhammer2024, author = {Seeberger, Philipp and Riedhammer, Korbinian}, title = {Multi-Query Focused Disaster Summarization via Instruction-Based Prompting}, publisher = {National Institute of Standards and Technology (NIST)}, doi = {10.48550/arXiv.2402.09008}, pages = {7}, year = {2024}, abstract = {Automatic summarization of mass-emergency events plays a critical role in disaster management. The second edition of CrisisFACTS aims to advance disaster summarization based on multi-stream fact-finding with a focus on web sources such as Twitter, Reddit, Facebook, and Webnews. Here, participants are asked to develop systems that can extract key facts from several disaster-related events, which ultimately serve as a summary. This paper describes our method to tackle this challenging task. We follow previous work and propose to use a combination of retrieval, reranking, and an embarrassingly simple instruction-following summarization. The two-stage retrieval pipeline relies on BM25 and MonoT5, while the summarizer module is based on the open-source Large Language Model (LLM) LLaMA-13b. For summarization, we explore a Question Answering (QA)-motivated prompting approach and find the evidence useful for extracting query-relevant facts. The automatic metrics and human evaluation show strong results but also highlight the gap between open-source and proprietary systems.}, language = {en} } @inproceedings{WagnerBaumannBraunetal.2023, author = {Wagner, Dominik and Baumann, Ilja and Braun, Franziska and Bayerl, Sebastian P. and N{\"o}th, Elmar and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Multi-class Detection of Pathological Speech with Latent Features}, issn = {2958-1796}, doi = {10.21437/Interspeech.2023-464}, pages = {2318 -- 2322}, year = {2023}, abstract = {The detection of pathologies from speech features is usually defined as a binary classification task with one class representing a specific pathology and the other class representing healthy speech. In this work, we train neural networks, large margin classifiers, and tree boosting machines to distinguish between four pathologies: Parkinson's disease, laryngeal cancer, cleft lip and palate, and oral squamous cell carcinoma. We show that latent representations extracted at different layers of a pre-trained wav2vec 2.0 system can be effectively used to classify these types of pathological voices. We evaluate the robustness of our classifiers by adding room impulse responses to the test data and by applying them to unseen speech corpora. Our approach achieves unweighted average F1-Scores between 74.1\% and 97.0\%, depending on the model and the noise conditions used. The systems generalize and perform well on unseen data of healthy speakers sampled from a variety of different sources.}, language = {en} } @inproceedings{RiedhammerBaumannBayerletal.2023, author = {Riedhammer, Korbinian and Baumann, Ilja and Bayerl, Sebastian P. and Bocklet, Tobias and Braun, Franziska and Wagner', Dominik}, title = {Medical Speech Processing for Diagnosis and Monitoring}, pages = {1417 -- 1420}, year = {2023}, abstract = {In recent years, speech processing for medical applications got significant traction. While pioneering work in the 1990ies focused on processing sustained vowels or isolated utterances, work in the 2000s already showed, that speech recognition systems, prosodic analysis and natural language processing be used to assess a large variety of speech pathologies.Here, we give an overview of how to classify selected speech pathologies including stuttering, language development, speech intelligibility after surgery, dementia and Alzheimers, depression and state-of-mind. While each of those poses a rather well-defined problem in a lab setting, we discuss the issues when integrating such methods in a clinical workflow such as diagnosis or monitoring. Starting from the question if such detectors can be used for general screening or rather as a specialist's tool, we explore the legal and privacy-related implications: patient-doctor conversations, working with children or demented seniors, bias towards examiner or patient, on-device vs. cloud processing.We conclude with a set of open questions that should be addressed to help bringing all this research from the lab to routine clinical use.}, language = {en} } @inproceedings{WagnerBayerlBaumannetal.2024, author = {Wagner, Dominik and Bayerl, Sebastian P. and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Large Language Models for Dysfluency Detection in Stuttered Speech}, doi = {10.48550/arXiv.2406.11025}, pages = {6}, year = {2024}, abstract = {Accurately detecting dysfluencies in spoken language can help to improve the performance of automatic speech and language processing components and support the development of more inclusive speech and language technologies. Inspired by the recent trend towards the deployment of large language models (LLMs) as universal learners and processors of non-lexical inputs, such as audio and video, we approach the task of multi-label dysfluency detection as a language modeling problem. We present hypotheses candidates generated with an automatic speech recognition system and acoustic representations extracted from an audio encoder model to an LLM, and finetune the system to predict dysfluency labels on three datasets containing English and German stuttered speech. The experimental results show that our system effectively combines acoustic and lexical information and achieves competitive results on the multi-label stuttering detection task.}, language = {en} } @article{BockletNoethRiedhammer2023, author = {Bocklet, Tobias and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {K{\"u}nstliche Intelligenz f{\"u}r die Analyse pathologischer Sprache}, series = {Sprache - Stimme - Geh{\"o}r}, volume = {47}, journal = {Sprache - Stimme - Geh{\"o}r}, number = {3}, doi = {10.1055/a-2089-5778}, pages = {145-150}, year = {2023}, abstract = {Sprache kann eine Vielzahl von diagnostisch relevanten Informationen enthalten. In diesem {\"U}bersichtsartikel wird aufgezeigt, wie Methoden der K{\"u}nstlichen Intelligenz, insbesondere Maschinelles Lernen und Sprachverarbeitung, angewendet auf Sprachsignale eingesetzt werden k{\"o}nnen: zur Bewertung von Verst{\"a}ndlichkeit, zur Automatisierung von standardisierten Tests und zur Bestimmung medizinischer Skalen und Diagnosen. Eine abschließende kritischen Betrachtung von akustischen Merkmalen {\"u}ber eine Vielzahl von Pathologien gibt Grund zur Annahme, dass diese Marker tats{\"a}chlich diagnostisch relevante Informationen enthalten.}, language = {de} } @inproceedings{BayerlGudenbergHoenigetal.2022, author = {Bayerl, Sebastian P. and Gudenberg, AlexanderWolffvon and H{\"o}nig, Florian and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {KSoF : The Kassel State of Fluency Dataset}, doi = {10.48550/arXiv.2203.05383}, pages = {8}, year = {2022}, abstract = {Stuttering is a complex speech disorder that negatively affects an individual's ability to communicate effectively. Persons who stutter (PWS) often suffer considerably under the condition and seek help through therapy. Fluency shaping is a therapy approach where PWSs learn to modify their speech to help them to overcome their stutter. Mastering such speech techniques takes time and practice, even after therapy. Shortly after therapy, success is evaluated highly, but relapse rates are high. To be able to monitor speech behavior over a long time, the ability to detect stuttering events and modifications in speech could help PWSs and speech pathologists to track the level of fluency. Monitoring could create the ability to intervene early by detecting lapses in fluency. To the best of our knowledge, no public dataset is available that contains speech from people who underwent stuttering therapy that changed the style of speaking. This work introduces the Kassel State of Fluency (KSoF), a therapy-based dataset containing over 5500 clips of PWSs. The clips were labeled with six stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. The audio was recorded during therapy sessions at the Institut der Kasseler Stottertherapie. The data will be made available for research purposes upon request.}, language = {en} } @article{EscobarGrisalesRiosUrregoBaumannetal.2024, author = {Escobar-Grisales, Daniel and R{\´i}os-Urrego, Cristian-David and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias and Garcia, Adolfo and Orozco-Arroyave, Juan rafael}, title = {It's Time to Take Action: Acoustic Modeling of Motor Verbs to Detect Parkinson's Disease}, doi = {10.21437/Interspeech.2024-2205}, year = {2024}, abstract = {Pre-trained models generate speech representations that are used in different tasks, including the automatic detection of Parkinson's disease (PD). Although these models can yield high accuracy, their interpretation is still challenging. This paper used a pre-trained Wav2vec 2.0 model to represent speech frames of 25ms length and perform a frame-by-frame discrimination between PD patients and healthy control (HC) subjects. This fine granularity prediction enabled us to identify specific linguistic segments with high discrimination capability. Speech representations of all produced verbs were compared w.r.t. nouns and the first ones yielded higher accuracies. To gaina deeper understanding of this pattern, representations of motor and non-motor verbs were compared and the first ones yielded better results, with accuracies of around 83\% in an independent test set. These findings support well-established neurocognitive models about action-related language highlighted as key drivers of PD. Index Terms: computational paralinguistics, interpretability of pre-trained models, action verbs, Parkinson's disease}, language = {en} } @inproceedings{BraunBayerlHoenigetal.2024, author = {Braun, Franziska and Bayerl, Sebastian and H{\"o}nig, Florian and Lehfeld, Hartmut and Hillemacher, Thomas and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Infusing Acoustic Pause Context into Text-Based Dementia Assessment}, issn = {2958-1796}, doi = {10.21437/Interspeech.2024-2496}, pages = {1980-1984}, year = {2024}, abstract = {Speech pauses, alongside content and structure, offer a valuable and non-invasive biomarker for detecting dementia. This work investigates the use of pause-enriched transcripts in transformer-based language models to differentiate the cognitive states of subjects with no cognitive impairment, mild cognitive impairment, and Alzheimer's dementia based on their speech from a clinical assessment. We address three binary classification tasks: Onset, monitoring, and dementia exclusion. The performance is evaluated through experiments on a German Verbal Fluency Test and a Picture Description Test, comparing the model's effectiveness across different speech production contexts. Starting from a textual baseline, we investigate the effect of incorporation of pause information and acoustic context. We show the test should be chosen depending on the task, and similarly, lexical pause information and acoustic cross-attention contribute differently.}, language = {en} } @inproceedings{SeebergerBockletRiedhammer2023, author = {Seeberger, Philipp and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Information Type Classification with Contrastive Task-Specialized Sentence Encoders}, publisher = {Association for Computational Linguistics}, doi = {10.48550/arXiv.2312.11020}, pages = {180-186}, year = {2023}, abstract = {User-generated information content has become an important information source in crisis situations. However, classification models suffer from noise and event-related biases which still poses a challenging task and requires sophisticated task-adaptation. To address these challenges, we propose the use of contrastive task-specialized sentence encoders for downstream classification. We apply the task-specialization on the CrisisLex, HumAID, and TrecIS information type classification tasks and show performance gains w.r.t. F1-score. Furthermore, we analyse the cross-corpus and cross-lingual capabilities for two German event relevancy classification datasets.}, language = {en} } @inproceedings{PerezToroBayerlAriasVergaraetal.2021, author = {Perez-Toro, Paula A. and Bayerl, Sebastian P. and Arias-Vergara, Tomas and Vasquez-Correa, Juan Camillo and Klumpp, Philipp and Schuster, Maria and N{\"o}th, Elmar and Orozco-Arroyave, Juan R. and Riedhammer, Korbinian}, title = {Influence of the Interviewer on the Automatic Assessment of Alzheimer's Disease in the Context of the ADReSSo Challenge}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1589}, pages = {3785 -- 3789}, year = {2021}, abstract = {Alzheimer's Disease (AD) results from the progressive loss of neurons in the hippocampus, which affects the capability to produce coherent language. It affects lexical, grammatical, and semantic processes as well as speech fluency. This paper considers the analyses of speech and language for the assessment of AD in the context of the Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSSo) 2021 challenge. We propose to extract acoustic features such as X-vectors, prosody, and emotional embeddings as well as linguistic features such as perplexity, and word-embeddings. The data consist of speech recordings from AD patients and healthy controls. The transcriptions are obtained using a commercial automatic speech recognition system. We outperform baseline results on the test set, both for the classification and the Mini-Mental State Examination (MMSE) prediction. We achieved a classification accuracy of 80\% and an RMSE of 4.56 in the regression. Additionally, we found strong evidence for the influence of the interviewer on classification results. In cross-validation on the training set, we get classification results of 85\% accuracy using the combined speech of the interviewer and the participant. Using interviewer speech only we still get an accuracy of 78\%. Thus, we provide strong evidence for interviewer influence on classification results.}, language = {en} } @inproceedings{HintzBayerlSinhaetal.2023, author = {Hintz, Jan and Bayerl, Sebastian P. and Sinha, Yamini and Riedhammer, Korbinian and Siegert, Ingo}, title = {Impact of pathological speech on speaker anonymization}, pages = {4}, year = {2023}, abstract = {With the ever-increasing usage of voice assistants, concerns for privacy and data security arise. Speech contains highly personal data that can be exploited for user profiling or identification [1]. On-device speech anonymization can serve as a measure to counteract this [2]. While these anonymization systems are being tested and evaluated through challenges and benchmarks [3], the commonly used datasets include no or only a few individuals with speech impairments, leading to low inclusivity, possible data bias, and privacy concerns for these groups [5]. For anonymization to work, it is crucial to evaluate and counteract bias if needed. Stuttering is a speech disorder with diverse characteristics. The well-known, defining symptoms are blocks, repetition and prolongation of sounds, syllables, and words while speaking [4]. The different primary stuttering symptoms vary strongly in their characteristics and occur over a different time context, making stuttering an ideal candidate to study the effects of pathological speech on the application of anonymization techniques. This paper analyzes the impact of stuttering on speaker anonymization, regarding the level of anonymity and utility. We present two methods to conceal speaker identity, us- ing voice conversion and re-synthesis. Firstly, Voice conversion, a process that adapts the way a source speaker speaks to a target speaker. It preserves some prosody of the source speaker, especially temporal aspects, with the goal of protecting the identity while at the same time preserving pathologic speech patterns. This could be applied in pathology-related processing, such as self-help training applications. Secondly, re-synthesis, based on an automatic speech recognition generating a transcript, which is afterward used to synthesize a new voice by a text-to-speech system. This process disentangles speaker information and text, granting a high level of anonymization. To compare these methods, we use subjective and objective measures.}, language = {en} } @incollection{BraunErzigkeitLehfeldetal.2022, author = {Braun, Franziska and Erzigkeit, Andreas and Lehfeld, Hartmut and Hillemacher, Thomas and Riedhammer, Korbinian and Bayerl, Sebastian P.}, title = {Going Beyond the Cookie Theft Picture Test: Detecting Cognitive Impairments Using Acoustic Features}, series = {Text, Speech, and Dialogue}, booktitle = {Text, Speech, and Dialogue}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {9783031162695}, issn = {0302-9743}, doi = {10.1007/978-3-031-16270-1_36}, pages = {437 -- 448}, year = {2022}, language = {en} } @inproceedings{RanzenbergerBockletFreisingeretal.2024, author = {Ranzenberger, Thomas and Bocklet, Tobias and Freisinger, Steffen and Georges, Munir and Glockner, Kevin and Herygers, Aaricia and Riedhammer, Korbinian and Schneider, Fabian and Simic, Christopher and Zakaria, Khabbab}, title = {EXTENDING HANS: LARGE LANGUAGE MODELS FOR QUESTION ANSWERING, SUMMARIZATION, AND TOPIC SEGMENTATION IN AN ML-BASED LEARNING EXPERIENCE PLATFORM}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, publisher = {TUPress}, address = {Dresden}, isbn = {978-3-95908-325-6}, pages = {219-224}, year = {2024}, abstract = {Abstract: The use of chatbots based on large language models (LLMs) and their impact on society are influencing our learning experience platform Hochschul Assistenz-System (HAnS). HAnS uses machine learning (ML) methods to support students and lecturers in the online learning and teaching processes [1]. This paper introduces LLM-based features available in HAnS which are using the transcript of our improved Automatic Speech Recognition (ASR) pipeline with an average transcription duration of 45 seconds and an average word error rate (WER) of 6.66\% on over 8 hours of audio data of 7 lecture videos. A LLM-based chatbot could be used to answer questions on the lecture content as the ASR transcript is provided as context. The summarization and topic segmentation uses the LLM to improve our learning experience platform. We generate multiple choice questions using the LLM and the ASR transcript as context during playback in a period of 3 minutes and display them in the HAnS frontend}, language = {en} } @article{RanzenbergerBaumannBayerletal.2025, author = {Ranzenberger, Thomas and Baumann, Ilja and Bayerl, Sebastian and Wagner, Dominik and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Evaluation of recognition errors of hybrid and transformer-based ASR systems in German video lectures}, series = {Studientexte zur Sprachkommunikation: Elektronische Sprachsignalverarbeitung 2025 - Book}, journal = {Studientexte zur Sprachkommunikation: Elektronische Sprachsignalverarbeitung 2025 - Book}, publisher = {ESSV 2025}, address = {Halle, Deutschland}, pages = {101-108}, year = {2025}, abstract = {We analyze different errors in speech recognition systems, focusing on consecutive insertions and deletions, known as hallucinations and elisions in transformer-based end-to-end automatic speech recognition (ASR) systems. We compare errors from a TDNN-HMM, and whisper-based models on English and German spontaneous speech. Based on a human annotated subset of German lecture videos, we investigate whether these blocks of deletions affect the semantics of the utterance. Whisper performs best and preserves the meaning in 90\% of the annotated error segments even containing consecutive deletions on this subset. We analyze the word error rate and do further analysis of errors using natural language processing to detect lemmatization errors, compound word errors, and out-of-vocabulary words. We discuss possible reasons and mitigations.}, language = {en} } @inproceedings{SeebergerRiedhammer2022, author = {Seeberger, Philipp and Riedhammer, Korbinian}, title = {Enhancing Crisis-Related Tweet Classification with Entity-Masked Language Modeling and Multi-Task Learning}, publisher = {Association for Computational Linguistics}, doi = {10.48550/arXiv.2211.11468}, pages = {70 -- 78}, year = {2022}, abstract = {Social media has become an important information source for crisis management and provides quick access to ongoing developments and critical information. However, classification models suffer from event-related biases and highly imbalanced label distributions which still poses a challenging task. To address these challenges, we propose a combination of entity-masked language modeling and hierarchical multi-label classification as a multi-task learning problem. We evaluate our method on tweets from the TREC-IS dataset and show an absolute performance gain w.r.t. F1-score of up to 10\% for actionable information types. Moreover, we found that entity-masking reduces the effect of overfitting to in-domain events and enables improvements in cross-event generalization.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2023, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Detection of Vowel Errors in Children's Speech Using Synthetic Phonetic Transcripts}, series = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, booktitle = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, doi = {10.1109/ASRU57964.2023.10389704}, pages = {1 -- 8}, year = {2023}, abstract = {The analysis of phonological processes is crucial in evaluating speech development disorders in children, but encounters challenges due to limited children audio data. This work focuses on automatic vowel error detection using a two-stage pipeline. The first stage uses a fine-tuned cross-lingual phone recognizer (wav2vec 2.0) to extract phone sequences from audio. The second stage employs a language model (BERT) for classification from a phone sequence, entirely trained on synthetic transcripts, to counteract the very broad range of potential mistakes. We evaluate the system on nonword audio recordings recited by preschool children from a speech development test. The results show that the classifier trained on synthetic data performs well, but its efficacy relies on the quality of the phone recognizer. The best classifier achieves an 94.7\% F1 score when evaluated against phonetic ground truths, whereas the F1 score is 76.2\% when using automatically recognized phone sequences.}, language = {en} } @article{BayerlWagnerBaumannetal.2023, author = {Bayerl, Sebastian P. and Wagner, Dominik and Baumann, Ilja and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Detecting Vocal Fatigue with Neural Embeddings}, series = {Journal of Voice}, journal = {Journal of Voice}, publisher = {Elsevier BV}, issn = {0892-1997}, doi = {10.1016/j.jvoice.2023.01.012}, pages = {11}, year = {2023}, abstract = {Vocal fatigue refers to the feeling of tiredness and weakness of voice due to extended utilization. This paper investigates the effectiveness of neural embeddings for the detection of vocal fatigue. We compare x-vectors, ECAPA-TDNN, and wav2vec 2.0 embeddings on a corpus of academic spoken English. Low-dimensional mappings of the data reveal that neural embeddings capture information about the change in vocal characteristics of a speaker during prolonged voice usage. We show that vocal fatigue can be reliably predicted using all three types of neural embeddings after 40 minutes of continuous speaking when temporal smoothing and normalization are applied to the extracted embeddings. We employ support vector machines for classification and achieve accuracy scores of 81\% using x-vectors, 85\% using ECAPA-TDNN embeddings, and 82\% using wav2vec 2.0 embeddings as input features. We obtain an accuracy score of 76\%, when the trained system is applied to a different speaker and recording environment without any adaptation.}, language = {en} } @inproceedings{BayerlTammewarRiedhammeretal.2021, author = {Bayerl, Sebastian P. and Tammewar, Aniruddha and Riedhammer, Korbinian and Riccardi, Giuseppe}, title = {Detecting Emotion Carriers by Combining Acoustic and Lexical Representations}, doi = {10.48550/arXiv.2112.06603}, pages = {8}, year = {2021}, abstract = {Personal narratives (PN) - spoken or written - are recollections of facts, people, events, and thoughts from one's own experience. Emotion recognition and sentiment analysis tasks are usually defined at the utterance or document level. However, in this work, we focus on Emotion Carriers (EC) defined as the segments (speech or text) that best explain the emotional state of the narrator ("loss of father", "made me choose"). Once extracted, such EC can provide a richer representation of the user state to improve natural language understanding and dialogue modeling. In previous work, it has been shown that EC can be identified using lexical features. However, spoken narratives should provide a richer description of the context and the users' emotional state. In this paper, we leverage word-based acoustic and textual embeddings as well as early and late fusion techniques for the detection of ECs in spoken narratives. For the acoustic word-level representations, we use Residual Neural Networks (ResNet) pretrained on separate speech emotion corpora and fine-tuned to detect EC. Experiments with different fusion and system combination strategies show that late fusion leads to significant improvements for this task.}, language = {en} } @inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian Peter and Wagner, Dominik and Noeth, Elmar and Riedhammer, Korbinian}, title = {Detecting Dysfluencies in Stuttering Therapy Using wav2vec 2.0}, series = {Interspeech 2022}, booktitle = {Interspeech 2022}, publisher = {ISCA}, address = {ISCA}, doi = {10.21437/Interspeech.2022-10908}, pages = {2868 -- 2872}, year = {2022}, abstract = {Stuttering is a varied speech disorder that harms an individual's communication ability. Persons who stutter (PWS) often use speech therapy to cope with their condition. Improving speech recognition systems for people with such non-typical speech or tracking the effectiveness of speech therapy would require systems that can detect dysfluencies while at the same time being able to detect speech techniques acquired in therapy. This paper shows that fine-tuning wav2vec 2.0 [1] for the classification of stuttering on a sizeable English corpus containing stuttered speech, in conjunction with multi-task learning, boosts the effectiveness of the general-purpose wav2vec 2.0 features for detecting stuttering in speech; both within and across languages. We evaluate our method on FluencyBank , [2] and the German therapy-centric Kassel State of Fluency (KSoF) [3] dataset by training Support Vector Machine classifiers using features extracted from the finetuned models for six different stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. Using embeddings from the fine-tuned models leads to relative classification performance gains up to 27\% w.r.t. F1-score.}, language = {en} } @inproceedings{SeebergerRiedhammer2022, author = {Seeberger, Philipp and Riedhammer, Korbinian}, title = {Combining Deep Neural Reranking and Unsupervised Extraction for Multi-Query Focused Summarization}, publisher = {National Institute of Standards and Technology (NIST)}, doi = {10.48550/arXiv.2302.01148}, pages = {7}, year = {2022}, abstract = {The CrisisFACTS Track aims to tackle challenges such as multi-stream fact-finding in the domain of event tracking; participants' systems extract important facts from several disaster-related events while incorporating the temporal order. We propose a combination of retrieval, reranking, and the well-known Integer Linear Programming (ILP) and Maximal Marginal Relevance (MMR) frameworks. In the former two modules, we explore various methods including an entity-based baseline, pre-trained and fine-tuned Question Answering systems, and ColBERT. We then use the latter module as an extractive summarization component by taking diversity and novelty criteria into account. The automatic scoring runs show strong results across the evaluation setups but also reveal shortcomings and challenges.}, language = {en} } @inproceedings{BraunBayerlPerezToroetal.2023, author = {Braun, Franziska and Bayerl, Sebastian P. and P{\´e}rez-Toro, Paula A. and H{\"o}nig, Florian and Lehfeld, Hartmut and Hillemacher, Thomas and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Classifying Dementia in the Presence of Depression}, doi = {10.48550/arXiv.2308.08306}, pages = {5}, year = {2023}, abstract = {Automated dementia screening enables early detection and intervention, reducing costs to healthcare systems and increasing quality of life for those affected. Depression has shared symptoms with dementia, adding complexity to diagnoses. The research focus so far has been on binary classification of dementia (DEM) and healthy controls (HC) using speech from picture description tests from a single dataset. In this work, we apply established baseline systems to discriminate cognitive impairment in speech from the semantic Verbal Fluency Test and the Boston Naming Test using text, audio and emotion embeddings in a 3-class classification problem (HC vs. MCI vs. DEM). We perform cross-corpus and mixed-corpus experiments on two independently recorded German datasets to investigate generalization to larger populations and different recording conditions. In a detailed error analysis, we look at depression as a secondary diagnosis to understand what our classifiers actually learn.}, language = {en} } @article{BayerlGerczukBatlineretal.2023, author = {Bayerl, Sebastian P. and Gerczuk, Maurice and Batliner, Anton and Bergler, Christian and Amiriparian, Shahin and Schuller, Bj{\"o}rn and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {Classification of Stuttering - The ComParE challenge and beyond}, series = {Computer Speech \& Language}, volume = {81}, journal = {Computer Speech \& Language}, doi = {10.1016/j.csl.2023.101519}, pages = {20}, year = {2023}, abstract = {The ACM Multimedia 2022 Computational Paralinguistics Challenge (ComParE) featured a sub-challenge on the classification of stuttering in order to bring attention to this important topic and engage a wider research community. Stuttering is a complex speech disorder characterized by blocks, prolongations of sounds and syllables, and repetitions of sounds and words. Accurately classifying the symptoms of stuttering has implications for the development of self-help tools and specialized automatic speech recognition systems (ASR) that can handle atypical speech patterns. This paper provides a review of the challenge contributions and improves upon them with new state-of-the-art classification results for the KSF-C dataset, and explores cross-language training to demonstrate the potential of datasets in multiple languages. To facilitate further research and reproducibility, the full KSF-C dataset, including test-set labels, is also released.}, language = {en} } @inproceedings{BaumannUngerWagneretal.2024, author = {Baumann, Ilja and Unger, Nicole and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Automatic Evaluation of a Sentence Memory Test for Preschool Children}, doi = {10.21437/Interspeech.2024-2125}, pages = {5158 -- 5162}, year = {2024}, abstract = {Assessment of memory capabilities in preschool-aged children is crucial for early detection of potential speech development impairments or delays. We present an approach for the automatic evaluation of a standardized sentence memory test specifically for preschool children. Our methodology leverages automatic transcription of recited sentences and evaluation based on natural language processing techniques. We demonstrate the effectiveness of our approach on a dataset comprised of recited sentences from preschool-aged children, incorporating ratings of semantic and syntactic correctness. The best performing systems achieve an F1 score of 91.7\% for semantic correctness and 86.1\% for syntactic correctness using automatic transcripts. Our results showcase the potential of automated evaluation systems in providing reliable and efficient assessments of memory capabilities in early childhood, facilitating timely interventions and support for children with language development needs.}, language = {en} } @inproceedings{BraunFoerstelOppermannetal.2022, author = {Braun, Franziska and F{\"o}rstel, Markus and Oppermann, Bastian and Erzigkeit, Andreas and Lehfeld, Hartmut and Hillemacher, Thomas and Riedhammer, Korbinian}, title = {Automated Evaluation of Standardized Dementia Screening Tests}, series = {Interspeech 2022}, booktitle = {Interspeech 2022}, publisher = {ISCA}, address = {ISCA}, doi = {10.21437/Interspeech.2022-10436}, pages = {2478 -- 2482}, year = {2022}, abstract = {For dementia screening and monitoring, standardized tests play a key role in clinical routine since they aim at minimizing subjectivity by measuring performance on a variety of cognitive tasks. In this paper, we report a study consisting of a semistandardized history taking followed by two standardized neuropsychological tests, namely the SKT and the CERAD-NB. The tests include basic tasks such as naming objects, learning word lists, but also widely used tools such as the MMSE. Most of the tasks are performed verbally and should thus be suitable for automated scoring based on transcripts. For the first batch of 30 patients, we analyze the correlation between expert manual evaluations and automatic evaluations based on manual and automatic transcriptions. For both SKT and CERAD-NB, we observe high to perfect correlations using manual transcripts; for certain tasks with lower correlation, the automatic scoring is stricter than the human reference since it is limited to the audio. Using automatic transcriptions, correlations drop as expected and are related to recognition accuracy; however, we still observe high correlations of up to 0.98 (SKT) and 0.85 (CERADNB). We show that using word alternatives helps to mitigate recognition errors and subsequently improves correlation with expert scores.}, language = {en} } @inproceedings{TammewarBraunRoccabrunaetal.2022, author = {Tammewar, Aniruddha and Braun, Franziska and Roccabruna, Gabriel and Bayerl, Sebastian P. and Riedhammer, Korbinian and Riccardi, Giuseppe}, title = {Annotation of Valence for Spoken Personal Narratives}, pages = {10}, year = {2022}, abstract = {Personal Narrative (PN) is the recollection of individuals' life experiences, events, and thoughts along with the associated emotions in the form of a story. Compared to other genres such as social media texts or microblogs, where people write about ex-perienced events or products, the spoken PNs are complex to analyze and understand. They are usually long and unstructured, involving multiple and related events, characters as well as thoughts and emotions associated with events, objects, and persons. In spoken PNs, emotions are conveyed by changing the speech signal characteristics as well as the lexical content of the narrative. In this work, we annotate a corpus of spoken personal narratives, with the emotion valence using discrete values. The PNs are segmented into speech segments, and the annotators annotate them in the discourse context, with values on a 5 point bipolar scale ranging from -2 to +2 (0 for neutral). In this way, we capture the unfolding of the PNs events and changes in the emotional state of the narrator. We perform an in-depth analysis of the inter-annotator agreement, the relation between the label distribution w.r.t. the stimulus (positive/negative) used for the elicitation of the narrative, and compare the segment-level annotations to a baseline continuous annotation. We find that the neutral score plays an important role in the agreement. We observe that it is easy to differentiate the positive from the negative valence while the confusion with the neutral label is high.}, language = {en} } @article{AgcharBaumannBraunetal.2024, author = {Agchar, Ismael and Baumann, Ilja and Braun, Franziska and Perez-Toro, Paula Andrea and Riedhammer, Korbinian and Trump, Sebastian and Ullrich, Martin}, title = {A Survey of Music Generation in the Context of Interaction}, doi = {10.48550/arXiv.2402.15294}, pages = {47}, year = {2024}, abstract = {In recent years, machine learning, and in particular generative adversarial neural networks (GANs) and attention-based neural networks (transformers), have been successfully used to compose and generate music, both melodies and polyphonic pieces. Current research focuses foremost on style replication (e.g., generating a Bach-style chorale) or style transfer (e.g., classical to jazz) based on large amounts of recorded or transcribed music, which in turn also allows for fairly straight-forward "performance" evaluation. However, most of these models are not suitable for human-machine co-creation through live interaction, neither is clear, how such models and resulting creations would be evaluated. This article presents a thorough review of music representation, feature analysis, heuristic algorithms, statistical and parametric modelling, and human and automatic evaluation measures, along with a discussion of which approaches and models seem most suitable for live interaction.}, language = {en} } @inproceedings{RanzenbergerFreierReinoldetal.2024, author = {Ranzenberger, Thomas and Freier, Carolin and Reinold, Luca and Riedhammer, Korbinian and Schneider, Fabian and Simic, Christopher and Simon, Claudia and Freisinger, Steffen and Georges, Munir and Bocklet, Tobias}, title = {A Multidisciplinary Approach to AI-based self-motivated Learning and Teaching with Large Language Models}, series = {Proceedings of DELFI Workshops 2024}, booktitle = {Proceedings of DELFI Workshops 2024}, publisher = {Gesellschaft f{\"u}r Informatik e.V.}, doi = {10.18420/delfi2024_11}, year = {2024}, abstract = {We present a learning experience platform that uses machine learning methods to support students and lecturers in self-motivated online learning and teaching processes. The platform is being developed as an agile open-source collaborative project supported by multiple universities and partners. The development is guided didactically, reviewed, and scientifically evaluated in several cycles. Transparency, data protection and the copyright compliant use of the system is a central part of the project. The system further employs large language models (LLMs). Due to privacy concerns, we utilize locally hosted LLM instances and explicitly do not rely on available cloud products. Students and lecturers can interact with an LLM-based chatbot in the current prototype. The AI-generated outputs contain cross-references to the current educational video's context, indicating if sections are based on the lectures context or world knowledge. We present the prototype and results of our qualitative evaluation from the perspective of lecturers and students.}, language = {en} } @inproceedings{RanzenbergerFreierReinoldetal.2024, author = {Ranzenberger, Thomas and Freier, Carolin and Reinold, Luca and Riedhammer, Korbinian and Schneider, Fabian and Simic, Christopher and Simon, Claudia and Freisinger, Steffen and Georges, Munir and Bocklet, Tobias}, title = {A Multidisciplinary Approach to AI-based self-motivated Learning and Teaching with Large Language Models}, series = {Proceedings of DELFI 2024}, booktitle = {Proceedings of DELFI 2024}, publisher = {Gesellschaft f{\"u}r Informatik e.V.}, address = {Bonn}, issn = {2944-7682}, doi = {10.18420/delfi2024_11}, pages = {133-140}, year = {2024}, abstract = {We present a learning experience platform that uses machine learning methods to support students and lecturers in self-motivated online learning and teaching processes. The platform is being developed as an agile open-source collaborative project supported by multiple universities and partners. The development is guided didactically, reviewed, and scientifically evaluated in several cycles. Transparency, data protection and the copyright compliant use of the system is a central part of the project. The system further employs large language models (LLMs). Due to privacy concerns, we utilize locally hosted LLM instances and explicitly do not rely on available cloud products. Students and lecturers can interact with an LLM-based chatbot in the current prototype. The AI-generated outputs contain cross-references to the current educational video's context, indicating if sections are based on the lectures context or world knowledge. We present the prototype and results of our qualitative evaluation from the perspective of lecturers and students.}, language = {en} } @inproceedings{RiedhammerBayerl2019, author = {Riedhammer, Korbinian and Bayerl, Sebastian P.}, title = {A Comparison of Hybrid and End-to-End Models for Syllable Recognition}, doi = {10.1007/978-3-030-27947-9_30}, pages = {8}, year = {2019}, abstract = {This paper presents a comparison of a traditional hybrid speech recognition system (kaldi using WFST and TDNN with lattice-free MMI) and a lexicon-free end-to-end (TensorFlow implementation of multi-layer LSTM with CTC training) models for German syllable recognition on the Verbmobil corpus. The results show that explicitly modeling prior knowledge is still valuable in building recognition systems. With a strong language model (LM) based on syllables, the structured approach significantly outperforms the end-to-end model. The best word error rate (WER) regarding syllables was achieved using kaldi with a 4-gram LM, modeling all syllables observed in the training set. It achieved 10.0\% WER w.r.t. the syllables, compared to the end-to-end approach where the best WER was 27.53\%. The work presented here has implications for building future recognition systems that operate independent of a large vocabulary, as typically used in a tasks such as recognition of syllabic or agglutinative languages, out-of-vocabulary techniques, keyword search indexing and medical speech processing.}, language = {en} }