@article{BockletNoethRiedhammer2023, author = {Bocklet, Tobias and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {K{\"u}nstliche Intelligenz f{\"u}r die Analyse pathologischer Sprache}, series = {Sprache - Stimme - Geh{\"o}r}, volume = {47}, journal = {Sprache - Stimme - Geh{\"o}r}, number = {3}, doi = {10.1055/a-2089-5778}, pages = {145-150}, year = {2023}, abstract = {Sprache kann eine Vielzahl von diagnostisch relevanten Informationen enthalten. In diesem {\"U}bersichtsartikel wird aufgezeigt, wie Methoden der K{\"u}nstlichen Intelligenz, insbesondere Maschinelles Lernen und Sprachverarbeitung, angewendet auf Sprachsignale eingesetzt werden k{\"o}nnen: zur Bewertung von Verst{\"a}ndlichkeit, zur Automatisierung von standardisierten Tests und zur Bestimmung medizinischer Skalen und Diagnosen. Eine abschließende kritischen Betrachtung von akustischen Merkmalen {\"u}ber eine Vielzahl von Pathologien gibt Grund zur Annahme, dass diese Marker tats{\"a}chlich diagnostisch relevante Informationen enthalten.}, language = {de} } @inproceedings{BraunBayerlPerezToroetal.2023, author = {Braun, Franziska and Bayerl, Sebastian P. and P{\´e}rez-Toro, Paula A. and H{\"o}nig, Florian and Lehfeld, Hartmut and Hillemacher, Thomas and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Classifying Dementia in the Presence of Depression}, doi = {10.48550/arXiv.2308.08306}, pages = {5}, year = {2023}, abstract = {Automated dementia screening enables early detection and intervention, reducing costs to healthcare systems and increasing quality of life for those affected. Depression has shared symptoms with dementia, adding complexity to diagnoses. The research focus so far has been on binary classification of dementia (DEM) and healthy controls (HC) using speech from picture description tests from a single dataset. In this work, we apply established baseline systems to discriminate cognitive impairment in speech from the semantic Verbal Fluency Test and the Boston Naming Test using text, audio and emotion embeddings in a 3-class classification problem (HC vs. MCI vs. DEM). We perform cross-corpus and mixed-corpus experiments on two independently recorded German datasets to investigate generalization to larger populations and different recording conditions. In a detailed error analysis, we look at depression as a secondary diagnosis to understand what our classifiers actually learn.}, language = {en} } @inproceedings{PerezToroBayerlAriasVergaraetal.2021, author = {Perez-Toro, Paula A. and Bayerl, Sebastian P. and Arias-Vergara, Tomas and Vasquez-Correa, Juan Camillo and Klumpp, Philipp and Schuster, Maria and N{\"o}th, Elmar and Orozco-Arroyave, Juan R. and Riedhammer, Korbinian}, title = {Influence of the Interviewer on the Automatic Assessment of Alzheimer's Disease in the Context of the ADReSSo Challenge}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1589}, pages = {3785 -- 3789}, year = {2021}, abstract = {Alzheimer's Disease (AD) results from the progressive loss of neurons in the hippocampus, which affects the capability to produce coherent language. It affects lexical, grammatical, and semantic processes as well as speech fluency. This paper considers the analyses of speech and language for the assessment of AD in the context of the Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSSo) 2021 challenge. We propose to extract acoustic features such as X-vectors, prosody, and emotional embeddings as well as linguistic features such as perplexity, and word-embeddings. The data consist of speech recordings from AD patients and healthy controls. The transcriptions are obtained using a commercial automatic speech recognition system. We outperform baseline results on the test set, both for the classification and the Mini-Mental State Examination (MMSE) prediction. We achieved a classification accuracy of 80\% and an RMSE of 4.56 in the regression. Additionally, we found strong evidence for the influence of the interviewer on classification results. In cross-validation on the training set, we get classification results of 85\% accuracy using the combined speech of the interviewer and the participant. Using interviewer speech only we still get an accuracy of 78\%. Thus, we provide strong evidence for interviewer influence on classification results.}, language = {en} } @inproceedings{WagnerBaumannBraunetal.2023, author = {Wagner, Dominik and Baumann, Ilja and Braun, Franziska and Bayerl, Sebastian P. and N{\"o}th, Elmar and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Multi-class Detection of Pathological Speech with Latent Features}, issn = {2958-1796}, doi = {10.21437/Interspeech.2023-464}, pages = {2318 -- 2322}, year = {2023}, abstract = {The detection of pathologies from speech features is usually defined as a binary classification task with one class representing a specific pathology and the other class representing healthy speech. In this work, we train neural networks, large margin classifiers, and tree boosting machines to distinguish between four pathologies: Parkinson's disease, laryngeal cancer, cleft lip and palate, and oral squamous cell carcinoma. We show that latent representations extracted at different layers of a pre-trained wav2vec 2.0 system can be effectively used to classify these types of pathological voices. We evaluate the robustness of our classifiers by adding room impulse responses to the test data and by applying them to unseen speech corpora. Our approach achieves unweighted average F1-Scores between 74.1\% and 97.0\%, depending on the model and the noise conditions used. The systems generalize and perform well on unseen data of healthy speakers sampled from a variety of different sources.}, language = {en} } @article{BayerlGerczukBatlineretal.2023, author = {Bayerl, Sebastian P. and Gerczuk, Maurice and Batliner, Anton and Bergler, Christian and Amiriparian, Shahin and Schuller, Bj{\"o}rn and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {Classification of Stuttering - The ComParE challenge and beyond}, series = {Computer Speech \& Language}, volume = {81}, journal = {Computer Speech \& Language}, doi = {10.1016/j.csl.2023.101519}, pages = {20}, year = {2023}, abstract = {The ACM Multimedia 2022 Computational Paralinguistics Challenge (ComParE) featured a sub-challenge on the classification of stuttering in order to bring attention to this important topic and engage a wider research community. Stuttering is a complex speech disorder characterized by blocks, prolongations of sounds and syllables, and repetitions of sounds and words. Accurately classifying the symptoms of stuttering has implications for the development of self-help tools and specialized automatic speech recognition systems (ASR) that can handle atypical speech patterns. This paper provides a review of the challenge contributions and improves upon them with new state-of-the-art classification results for the KSF-C dataset, and explores cross-language training to demonstrate the potential of datasets in multiple languages. To facilitate further research and reproducibility, the full KSF-C dataset, including test-set labels, is also released.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2023, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Detection of Vowel Errors in Children's Speech Using Synthetic Phonetic Transcripts}, series = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, booktitle = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, doi = {10.1109/ASRU57964.2023.10389704}, pages = {1 -- 8}, year = {2023}, abstract = {The analysis of phonological processes is crucial in evaluating speech development disorders in children, but encounters challenges due to limited children audio data. This work focuses on automatic vowel error detection using a two-stage pipeline. The first stage uses a fine-tuned cross-lingual phone recognizer (wav2vec 2.0) to extract phone sequences from audio. The second stage employs a language model (BERT) for classification from a phone sequence, entirely trained on synthetic transcripts, to counteract the very broad range of potential mistakes. We evaluate the system on nonword audio recordings recited by preschool children from a speech development test. The results show that the classifier trained on synthetic data performs well, but its efficacy relies on the quality of the phone recognizer. The best classifier achieves an 94.7\% F1 score when evaluated against phonetic ground truths, whereas the F1 score is 76.2\% when using automatically recognized phone sequences.}, language = {en} } @inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian P. and Wagner, Dominik and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian and Sojka, Petr and Kopeček, Ivan and Pala, Karel and Hor{\´a}k, Aleš}, title = {The Influence of Dataset Partitioning on Dysfluency Detection Systems}, publisher = {Springer International Publishing}, doi = {10.48550/arXiv.2206.03400}, pages = {14}, year = {2022}, abstract = {This paper empirically investigates the influence of different data splits and splitting strategies on the performance of dysfluency detection systems. For this, we perform experiments using wav2vec 2.0 models with a classification head as well as support vector machines (SVM) in conjunction with the features extracted from the wav2vec 2.0 model to detect dysfluencies. We train and evaluate the systems with different non-speaker-exclusive and speaker-exclusive splits of the Stuttering Events in Podcasts (SEP-28k) dataset to shed some light on the variability of results w.r.t. to the partition method used. Furthermore, we show that the SEP-28k dataset is dominated by only a few speakers, making it difficult to evaluate. To remedy this problem, we created SEP-28k-Extended (SEP-28k-E), containing semi-automatically generated speaker and gender information for the SEP-28k corpus, and suggest different data splits, each useful for evaluating other aspects of methods for dysfluency detection.}, language = {en} } @inproceedings{BaumannWagnerSchusteretal.2024, author = {Baumann, Ilja and Wagner, Dominik and Schuster, Maria and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Towards Interpretability of Automatic Phoneme Analysis in Cleft Lip and Palate Speech}, series = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, booktitle = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, doi = {10.1109/ICASSP48485.2024.10447632}, pages = {12602 -- 12606}, year = {2024}, abstract = {Cleft Lip and Palate ranks among the most common congenital abnormalities and significantly influences speech articulation, resulting in varying phonemic impacts. In a clinical context, a detailed diagnosis is carried out by time-consuming perceptual evaluations. We use perceptual ratings of different articulatory modifications on phoneme-level as ground-truth and propose a system based on wav2vec 2.0, trained to the downstream task of classifying phonemic criteria as a multi-class and multi-label problem. The system is trained for detection on utterance level, without the usage of phoneme labels. To gain a clearer understanding of which areas of the speech signal have the greatest impact on classification, we assess the extent to which our system aligns with expert ratings at the phoneme level. Additionally, we examine which specific phonemes play a decisive role in determining the final classification of the labeled criteria. The results show that salient phonemes marked by experts contribute remarkably greater to the classification of the correct class using feature relevance explanation methods. To the best of our knowledge, this is the first study incorporating various utterance-level articulatory modifications classification and phoneme-level interpretation, offering a more comprehensive understanding for potential clinical applications.}, language = {en} } @inproceedings{BayerlGudenbergHoenigetal.2022, author = {Bayerl, Sebastian P. and Gudenberg, AlexanderWolffvon and H{\"o}nig, Florian and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {KSoF : The Kassel State of Fluency Dataset}, doi = {10.48550/arXiv.2203.05383}, pages = {8}, year = {2022}, abstract = {Stuttering is a complex speech disorder that negatively affects an individual's ability to communicate effectively. Persons who stutter (PWS) often suffer considerably under the condition and seek help through therapy. Fluency shaping is a therapy approach where PWSs learn to modify their speech to help them to overcome their stutter. Mastering such speech techniques takes time and practice, even after therapy. Shortly after therapy, success is evaluated highly, but relapse rates are high. To be able to monitor speech behavior over a long time, the ability to detect stuttering events and modifications in speech could help PWSs and speech pathologists to track the level of fluency. Monitoring could create the ability to intervene early by detecting lapses in fluency. To the best of our knowledge, no public dataset is available that contains speech from people who underwent stuttering therapy that changed the style of speaking. This work introduces the Kassel State of Fluency (KSoF), a therapy-based dataset containing over 5500 clips of PWSs. The clips were labeled with six stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. The audio was recorded during therapy sessions at the Institut der Kasseler Stottertherapie. The data will be made available for research purposes upon request.}, language = {en} } @article{PerezToroVasquezCorreaBockletetal.2023, author = {P{\´e}rez-Toro, Paula Andrea and V{\´a}squez-Correa, Juan Camilo and Bocklet, Tobias and N{\"o}th, Elmar and Orozco-Arroyave, Juan Rafael}, title = {User State Modeling Based on the Arousal-Valence Plane: Applications in Customer Satisfaction and Health-Care}, series = {IEEE Transactions on Affective Computing}, volume = {14}, journal = {IEEE Transactions on Affective Computing}, number = {2}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, issn = {1949-3045}, doi = {10.1109/taffc.2021.3112543}, pages = {1533 -- 1546}, year = {2023}, abstract = {The acoustic analysis helps to discriminate emotions according to non-verbal information, while linguistics aims to capture verbal information from written sources. Acoustic and linguistic analyses can be addressed for different applications, where information related to emotions, mood, or affect are involved. The Arousal-Valence plane is commonly used to model emotional states in a multidimensional space. This study proposes a methodology focused on modeling the user's state based on the Arousal-Valence plane in different scenarios. Acoustic and linguistic information are used as input to feed different deep learning architectures mainly based on convolutional and recurrent neural networks, which are trained to model the Arousal-Valence plane. The proposed approach is used for the evaluation of customer satisfaction in call-centers and for health-care applications in the assessment of depression in Parkinson's disease and the discrimination of Alzheimer's disease. F-scores of up to 0.89 are obtained for customer satisfaction, of up to 0.82 for depression in Parkinson's patients, and of up to 0.80 for Alzheimer's patients. The proposed approach confirms that there is information embedded in the Arousal-Valence plane that can be used for different purposes.}, language = {en} } @inproceedings{ScheuererHaderleinNoethetal.2021, author = {Scheuerer, Ralph and Haderlein, Tino and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Applying X-Vectors on Pathological Speech After Larynx Removal}, series = {2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)}, volume = {2021}, booktitle = {2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)}, publisher = {IEEE}, isbn = {978-1-6654-3739-4}, doi = {10.1109/asru51503.2021.9688278}, pages = {1079 -- 1086}, year = {2021}, abstract = {Speaker embeddings extracted from time delayed neural networks (TDNNs) contributed to major recent advancements in speaker recognition and verification. We use an X-Vector system trained on augmented VoxCeleb1 and VoxCeleb2 data to obtain embeddings for pathological speech after total or partial larynx removal. We show that our model is able to effectively distinguish and visualize patient groups when generating embeddings. We further compare various regression models on the task of automatically predicting different perceptual ratings by speech therapists (intelligibility, vocal effort, and overall quality) based on the extracted speaker embeddings. For both patient groups we show Pearson correlations in the range of +0.8; we find that Random Forest and Support Vector Regression produce scores that best resemble the experts' assessments.}, language = {en} } @inproceedings{KlumppBockletAriasVergaraetal.2021, author = {Klumpp, P. and Bocklet, Tobias and Arias-Vergara, T. and V{\´a}squez-Correa, J. C. and P{\´e}rez-Toro, P.A. and Bayerl, Sebastian P. and Orozco-Arroyave, J. R. and N{\"o}th, Elmar}, title = {The Phonetic Footprint of Covid-19?}, series = {Interspeech 2021}, booktitle = {Interspeech 2021}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1488}, pages = {441 -- 445}, year = {2021}, abstract = {Against the background of the ongoing pandemic, this year's Computational Paralinguistics Challenge featured a classification problem to detect Covid-19 from speech recordings. The presented approach is based on a phonetic analysis of speech samples, thus it enabled us not only to discriminate between Covid and non-Covid samples, but also to better understand how the condition influenced an individual's speech signal. Our deep acoustic model was trained with datasets collected exclusively from healthy speakers. It served as a tool for segmentation and feature extraction on the samples from the challenge dataset. Distinct patterns were found in the embeddings of phonetic classes that have their place of articulation deep inside the vocal tract. We observed profound differences in classification results for development and test splits, similar to the baseline method. We concluded that, based on our phonetic findings, it was safe to assume that our classifier was able to reliably detect a pathological condition located in the respiratory tract. However, we found no evidence to claim that the system was able to discriminate between Covid-19 and other respiratory diseases.}, language = {en} } @incollection{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian P. and Wagner, Dominik and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian}, title = {The Influence of Dataset Partitioning on Dysfluency Detection Systems}, series = {Text, Speech, and Dialogue}, booktitle = {Text, Speech, and Dialogue}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {9783031162695}, issn = {0302-9743}, doi = {10.1007/978-3-031-16270-1_35}, pages = {423 -- 436}, year = {2022}, abstract = {This paper empirically investigates the influence of different data splits and splitting strategies on the performance of dysfluency detection systems. For this, we perform experiments using wav2vec 2.0 models with a classification head as well as support vector machines (SVM) in conjunction with the features extracted from the wav2vec 2.0 model to detect dysfluencies. We train and evaluate the systems with different non-speaker-exclusive and speaker-exclusive splits of the Stuttering Events in Podcasts (SEP-28k) dataset to shed some light on the variability of results w.r.t. to the partition method used. Furthermore, we show that the SEP-28k dataset is dominated by only a few speakers, making it difficult to evaluate. To remedy this problem, we created SEP-28k-Extended (SEP-28k-E), containing semi-automatically generated speaker and gender information for the SEP-28k corpus, and suggest different data splits, each useful for evaluating other aspects of methods for dysfluency detection.}, language = {en} } @inproceedings{WagnerBayerlBaumannetal.2024, author = {Wagner, Dominik and Bayerl, Sebastian P. and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Large Language Models for Dysfluency Detection in Stuttered Speech}, doi = {10.48550/arXiv.2406.11025}, pages = {6}, year = {2024}, abstract = {Accurately detecting dysfluencies in spoken language can help to improve the performance of automatic speech and language processing components and support the development of more inclusive speech and language technologies. Inspired by the recent trend towards the deployment of large language models (LLMs) as universal learners and processors of non-lexical inputs, such as audio and video, we approach the task of multi-label dysfluency detection as a language modeling problem. We present hypotheses candidates generated with an automatic speech recognition system and acoustic representations extracted from an audio encoder model to an LLM, and finetune the system to predict dysfluency labels on three datasets containing English and German stuttered speech. The experimental results show that our system effectively combines acoustic and lexical information and achieves competitive results on the multi-label stuttering detection task.}, language = {en} } @inproceedings{BaumannWagnerSchusteretal.2024, author = {Baumann, Ilja and Wagner, Dominik and Schuster, Maria and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Towards Self-Attention Understanding for Automatic Articulatory Processes Analysis in Cleft Lip and Palate Speech}, doi = {10.21437/Interspeech.2024-2134}, pages = {2430 -- 2434}, year = {2024}, abstract = {Cleft lip and palate (CLP) speech presents unique challenges for automatic phoneme analysis due to its distinct acoustic characteristics and articulatory anomalies. We perform phoneme analysis in CLP speech using a pre-trained wav2vec 2.0 model with a multi-head self-attention classification module to capture long-range dependencies within the speech signal, thereby enabling better contextual understanding of phoneme sequences. We demonstrate the effectiveness of our approach in the classification of various articulatory processes in CLP speech. Furthermore, we investigate the interpretability of self-attention to gain insights into the model's understanding of CLP speech characteristics. Our findings highlight the potential of the selfattention mechanisms for improving automatic phoneme analysis in CLP speech, paving the way for enhanced diagnostics, adding interpretability for therapists and affected patients.}, language = {en} } @article{EscobarGrisalesRiosUrregoBaumannetal.2024, author = {Escobar-Grisales, Daniel and R{\´i}os-Urrego, Cristian-David and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias and Garcia, Adolfo and Orozco-Arroyave, Juan rafael}, title = {It's Time to Take Action: Acoustic Modeling of Motor Verbs to Detect Parkinson's Disease}, doi = {10.21437/Interspeech.2024-2205}, year = {2024}, abstract = {Pre-trained models generate speech representations that are used in different tasks, including the automatic detection of Parkinson's disease (PD). Although these models can yield high accuracy, their interpretation is still challenging. This paper used a pre-trained Wav2vec 2.0 model to represent speech frames of 25ms length and perform a frame-by-frame discrimination between PD patients and healthy control (HC) subjects. This fine granularity prediction enabled us to identify specific linguistic segments with high discrimination capability. Speech representations of all produced verbs were compared w.r.t. nouns and the first ones yielded higher accuracies. To gaina deeper understanding of this pattern, representations of motor and non-motor verbs were compared and the first ones yielded better results, with accuracies of around 83\% in an independent test set. These findings support well-established neurocognitive models about action-related language highlighted as key drivers of PD. Index Terms: computational paralinguistics, interpretability of pre-trained models, action verbs, Parkinson's disease}, language = {en} } @inproceedings{WagnerBaumannEngertetal.2025, author = {Wagner, Dominik and Baumann, Ilja and Engert, Natalie and Lee, Seanie and N{\"o}th, Elmar and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Personalized Fine-Tuning with Controllable Synthetic Speech from LLM-Generated Transcripts for Dysarthric Speech Recognition}, series = {Interspeech 2025}, booktitle = {Interspeech 2025}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2025-2155}, pages = {3294 -- 3298}, year = {2025}, abstract = {In this work, we present our submission to the Speech Accessibility Project challenge for dysarthric speech recognition. We integrate parameter-efficient fine-tuning with latent audio representations to improve an encoder-decoder ASR system. Synthetic training data is generated by fine-tuning Parler-TTS to mimic dysarthric speech, using LLM-generated prompts for corpus-consistent target transcripts. Personalization with x-vectors consistently reduces word error rates (WERs) over non-personalized fine-tuning. AdaLoRA adapters outperform full fine-tuning and standard low-rank adaptation, achieving relative WER reductions of ∼23\% and ∼22\%, respectively. Further improvements (∼5\% WER reduction) come from incorporating wav2vec 2.0-based audio representations. Training with synthetic dysarthric speech yields up to ∼7\% relative WER improvement over personalized fine-tuning alone.}, language = {en} }