@inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, SebastianP and Wagner, Dominik and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {Detecting Dysfluencies in Stuttering Therapy Using wav2vec 2.0}, doi = {https://doi.org/10.48550/arXiv.2204.03417}, pages = {5}, year = {2022}, abstract = {Stuttering is a varied speech disorder that harms an individual's communication ability. Persons who stutter (PWS) often use speech therapy to cope with their condition. Improving speech recognition systems for people with such non-typical speech or tracking the effectiveness of speech therapy would require systems that can detect dysfluencies while at the same time being able to detect speech techniques acquired in therapy. This paper shows that fine-tuning wav2vec 2.0 [1] for the classification of stuttering on a sizeable English corpus containing stuttered speech, in conjunction with multi-task learning, boosts the effectiveness of the general-purpose wav2vec 2.0 features for detecting stuttering in speech; both within and across languages. We evaluate our method on FluencyBank , [2] and the German therapy-centric Kassel State of Fluency (KSoF) [3] dataset by training Support Vector Machine classifiers using features extracted from the finetuned models for six different stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. Using embeddings from the fine-tuned models leads to relative classification performance gains up to 27\% w.r.t. F1-score.}, language = {en} } @inproceedings{BaumannWagnerBayerletal.2022, author = {Baumann, Ilja and Wagner, Dominik and Bayerl, SebastianP and Bocklet, Tobias}, title = {Nonwords Pronunciation Classification in Language Development Tests for Preschool Children}, series = {Interspeech 2022 : 18-22 September 2022, Incheon, Korea}, booktitle = {Interspeech 2022 : 18-22 September 2022, Incheon, Korea}, doi = {10.21437/Interspeech.2022-10777}, pages = {3643 -- 3647}, year = {2022}, abstract = {This work aims to automatically evaluate whether the language development of children is age-appropriate. Validated speech and language tests are used for this purpose to test the auditory memory. In this work, the task is to determine whether spoken nonwords have been uttered correctly. We compare different approaches that are motivated to model specific language structures: Low-level features (FFT), speaker embeddings (ECAPA-TDNN), grapheme-motivated embeddings (wav2vec 2.0), and phonetic embeddings in form of senones (ASR acoustic model). Each of the approaches provides input for VGG-like 5-layer CNN classifiers. We also examine the adaptation per non-word. The evaluation of the proposed systems was performed using recordings from different kindergartens of spoken non- words. ECAPA-TDNN and low-level FFT features do not explicitly model phonetic information; wav2vec2.0 is trained on grapheme labels, our ASR acoustic model features contain (sub-)phonetic information. We found that the more granular the phonetic modeling is, the higher are the achieved recognition rates. The best system trained on ASR acoustic model features with VTLN achieved an accuracy of 89.4\% and an area under the ROC (Receiver Operating Characteristic) curve (AUC) of 0.923. This corresponds to an improvement in accuracy of 20.2\% and AUC of 0.309 relative compared to the FFT-baseline}, language = {en} } @inproceedings{BayerlTammewarRiedhammeretal.2021, author = {Bayerl, SebastianP and Tammewar, Aniruddha and Riedhammer, Korbinian and Riccardi, Giuseppe}, title = {Detecting Emotion Carriers by Combining Acoustic and Lexical Representations}, doi = {10.48550/arXiv.2112.06603}, pages = {8}, year = {2021}, abstract = {Personal narratives (PN) - spoken or written - are recollections of facts, people, events, and thoughts from one's own experience. Emotion recognition and sentiment analysis tasks are usually defined at the utterance or document level. However, in this work, we focus on Emotion Carriers (EC) defined as the segments (speech or text) that best explain the emotional state of the narrator ("loss of father", "made me choose"). Once extracted, such EC can provide a richer representation of the user state to improve natural language understanding and dialogue modeling. In previous work, it has been shown that EC can be identified using lexical features. However, spoken narratives should provide a richer description of the context and the users' emotional state. In this paper, we leverage word-based acoustic and textual embeddings as well as early and late fusion techniques for the detection of ECs in spoken narratives. For the acoustic word-level representations, we use Residual Neural Networks (ResNet) pretrained on separate speech emotion corpora and fine-tuned to detect EC. Experiments with different fusion and system combination strategies show that late fusion leads to significant improvements for this task.}, language = {en} } @inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, SebastianP and Wagner, Dominik and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian and Sojka, Petr and Kopeček, Ivan and Pala, Karel and Hor{\´a}k, Aleš}, title = {The Influence of Dataset Partitioning on Dysfluency Detection Systems}, publisher = {Springer International Publishing}, doi = {10.48550/arXiv.2206.03400}, pages = {14}, year = {2022}, abstract = {This paper empirically investigates the influence of different data splits and splitting strategies on the performance of dysfluency detection systems. For this, we perform experiments using wav2vec 2.0 models with a classification head as well as support vector machines (SVM) in conjunction with the features extracted from the wav2vec 2.0 model to detect dysfluencies. We train and evaluate the systems with different non-speaker-exclusive and speaker-exclusive splits of the Stuttering Events in Podcasts (SEP-28k) dataset to shed some light on the variability of results w.r.t. to the partition method used. Furthermore, we show that the SEP-28k dataset is dominated by only a few speakers, making it difficult to evaluate. To remedy this problem, we created SEP-28k-Extended (SEP-28k-E), containing semi-automatically generated speaker and gender information for the SEP-28k corpus, and suggest different data splits, each useful for evaluating other aspects of methods for dysfluency detection.}, language = {en} } @inproceedings{BayerlFrasettoJauernigetal.2020, author = {Bayerl, SebastianP and Frasetto, Tommaso and Jauernig, Patrick and Riedhammer, Korbinian and Sadeghi, Ahmad-Reza and Schneider, Thomas and Stapf, Emmanuel and Weinert, Christian}, title = {Offline Model Guard}, doi = {https://doi.org/10.48550/arXiv.2007.02351}, pages = {6}, year = {2020}, abstract = {Performing machine learning tasks in mobile applications yields a challenging conflict of interest: highly sensitive client information (e.g., speech data) should remain private while also the intellectual property of service providers (e.g., model parameters) must be protected. Cryptographic techniques offer secure solutions for this, but have an unacceptable overhead and moreover require frequent network interaction. In this work, we design a practically efficient hardware-based solution. Specifically, we build Offline Model Guard (OMG) to enable privacy-preserving machine learning on the predominant mobile computing platform ARM - even in offline scenarios. By leveraging a trusted execution environment for strict hardware-enforced isolation from other system components, OMG guarantees privacy of client data, secrecy of provided models, and integrity of processing algorithms. Our prototype implementation on an ARM HiKey 960 development board performs privacy-preserving keyword recognition using TensorFlow Lite for Microcontrollers in real time.}, language = {en} } @inproceedings{BayerlGudenbergHoenigetal.2022, author = {Bayerl, SebastianP and Gudenberg, AlexanderWolffvon and H{\"o}nig, Florian and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {KSoF : The Kassel State of Fluency Dataset}, doi = {10.48550/arXiv.2203.05383}, pages = {8}, year = {2022}, abstract = {Stuttering is a complex speech disorder that negatively affects an individual's ability to communicate effectively. Persons who stutter (PWS) often suffer considerably under the condition and seek help through therapy. Fluency shaping is a therapy approach where PWSs learn to modify their speech to help them to overcome their stutter. Mastering such speech techniques takes time and practice, even after therapy. Shortly after therapy, success is evaluated highly, but relapse rates are high. To be able to monitor speech behavior over a long time, the ability to detect stuttering events and modifications in speech could help PWSs and speech pathologists to track the level of fluency. Monitoring could create the ability to intervene early by detecting lapses in fluency. To the best of our knowledge, no public dataset is available that contains speech from people who underwent stuttering therapy that changed the style of speaking. This work introduces the Kassel State of Fluency (KSoF), a therapy-based dataset containing over 5500 clips of PWSs. The clips were labeled with six stuttering-related event types: blocks, prolongations, sound repetitions, word repetitions, interjections, and - specific to therapy - speech modifications. The audio was recorded during therapy sessions at the Institut der Kasseler Stottertherapie. The data will be made available for research purposes upon request.}, language = {en} } @inproceedings{RiedhammerBayerl2019, author = {Riedhammer, Korbinian and Bayerl, SebastianP}, title = {A Comparison of Hybrid and End-to-End Models for Syllable Recognition}, doi = {10.1007/978-3-030-27947-9_30}, pages = {8}, year = {2019}, abstract = {This paper presents a comparison of a traditional hybrid speech recognition system (kaldi using WFST and TDNN with lattice-free MMI) and a lexicon-free end-to-end (TensorFlow implementation of multi-layer LSTM with CTC training) models for German syllable recognition on the Verbmobil corpus. The results show that explicitly modeling prior knowledge is still valuable in building recognition systems. With a strong language model (LM) based on syllables, the structured approach significantly outperforms the end-to-end model. The best word error rate (WER) regarding syllables was achieved using kaldi with a 4-gram LM, modeling all syllables observed in the training set. It achieved 10.0\% WER w.r.t. the syllables, compared to the end-to-end approach where the best WER was 27.53\%. The work presented here has implications for building future recognition systems that operate independent of a large vocabulary, as typically used in a tasks such as recognition of syllabic or agglutinative languages, out-of-vocabulary techniques, keyword search indexing and medical speech processing.}, language = {en} } @inproceedings{BraunErzigkeitLehfeldetal.2022, author = {Braun, Franziska and Erzigkeit, Andreas and Lehfeld, Hartmut and Hillemacher, Thomas and Riedhammer, Korbinian and Bayerl, SebastianP and Sojka, Petr and Kopeček, Ivan and Pala, Karel and Hor{\´a}k, Aleš}, title = {Going Beyond the Cookie Theft Picture Test}, series = {Lecture notes in computer science ; 13502}, booktitle = {Lecture notes in computer science ; 13502}, publisher = {Springer International Publishing}, isbn = {978-3-031-16270-1}, doi = {10.48550/arXiv.2206.05018}, pages = {437 -- 448}, year = {2022}, language = {en} } @inproceedings{PerezToroBayerlAriasVergaraetal.2021, author = {Perez-Toro, PaulaA and Bayerl, SebastianP and Arias-Vergara, Tomas and Vasquez-Correa, JuanCamillo and Klumpp, Philipp and Schuster, Maria and N{\"o}th, Elmar and Orozco-Arroyave, JuanR and Riedhammer, Korbinian}, title = {Influence of the Interviewer on the Automatic Assessment of Alzheimer's Disease in the Context of the ADReSSo Challenge}, issn = {2958-1796}, doi = {10.21437/Interspeech.2021-1589}, pages = {3785 -- 3789}, year = {2021}, abstract = {Alzheimer's Disease (AD) results from the progressive loss of neurons in the hippocampus, which affects the capability to produce coherent language. It affects lexical, grammatical, and semantic processes as well as speech fluency. This paper considers the analyses of speech and language for the assessment of AD in the context of the Alzheimer's Dementia Recognition through Spontaneous Speech (ADReSSo) 2021 challenge. We propose to extract acoustic features such as X-vectors, prosody, and emotional embeddings as well as linguistic features such as perplexity, and word-embeddings. The data consist of speech recordings from AD patients and healthy controls. The transcriptions are obtained using a commercial automatic speech recognition system. We outperform baseline results on the test set, both for the classification and the Mini-Mental State Examination (MMSE) prediction. We achieved a classification accuracy of 80\% and an RMSE of 4.56 in the regression. Additionally, we found strong evidence for the influence of the interviewer on classification results. In cross-validation on the training set, we get classification results of 85\% accuracy using the combined speech of the interviewer and the participant. Using interviewer speech only we still get an accuracy of 78\%. Thus, we provide strong evidence for interviewer influence on classification results.}, language = {en} }