@article{FreierBockletHeltenetal.2023, author = {Freier, Carolin and Bocklet, Tobias and Helten, Anne-Kathrin and Hoffmann, Franziska and Hunger, Marianne and Kov{\´a}cs, L{\´a}szl{\´o} and Richter, Florian and Riedhammer, Korbinian and Schmohl, Tobias and Simon, Claudia}, title = {Wie kann videogest{\"u}tztes Lernen die Erwartungen Studierender und Dozierender erf{\"u}llen?}, series = {Soziale Passagen}, volume = {15}, journal = {Soziale Passagen}, number = {2}, publisher = {Springer VS}, issn = {1867-0180}, doi = {10.1007/s12592-023-00478-0}, pages = {631 -- 635}, year = {2023}, abstract = {ZusammenfassungIm BMBF-Verbundprojekt HAnS entwickeln und implementieren neun Hochschulen sowie drei hochschul{\"u}bergreifende Einrichtungen ein intelligentes Hochschul-Assistenz-System als Open-Source-L{\"o}sung. Videobasierte Lehrmaterialien werden verschriftlicht und durch eine Indexierung Stichwortsuchen erm{\"o}glicht; geplant ist, {\"u}ber einen KI-Tutor automatisiert {\"U}bungsaufgaben zu generieren. Studierende sollen so in ihrem Selbststudium digital unterst{\"u}tzt werden. Die technische Entwicklung wird interdisziplin{\"a}r - auch sozialwissenschaftlich und p{\"a}dagogisch - begleitet und in einem iterativen Vorgehen evidenzbasiert entsprechend Design-Based-Research angepasst. Wissen und Wertesystem der Anwender*innen, Didaktik, Ethik, Akzeptanz und Datenschutz werden dabei im Entwicklungsprozess einbezogen.}, language = {de} } @article{SchusterAriasVergaraMuellerHoerneretal.2020, author = {Schuster, Maria and Arias-Vergara, Tomas and M{\"u}ller-H{\"o}rner, Rainer and Winterholler, Cordula and Bocklet, Tobias}, title = {Verstehen mich mit der Maske noch alle?}, series = {MMW - Fortschritte der Medizin}, volume = {162}, journal = {MMW - Fortschritte der Medizin}, number = {14}, doi = {10.1007/s15006-020-0749-4}, pages = {42 -- 44}, year = {2020}, abstract = {In Arztpraxen nimmt der Infektionsschutz mittels Mund-Nasen-Maske momentan eine zentrale Rolle ein. Wenig nachgedacht wird dabei {\"u}ber die Folgen f{\"u}r die Kommunikation. Gerade schwerh{\"o}rigen Patienten bereiten ged{\"a}mpfte Stimmen und abgedeckte Lippen Probleme.}, language = {de} } @article{BockletNoethRiedhammer2023, author = {Bocklet, Tobias and N{\"o}th, Elmar and Riedhammer, Korbinian}, title = {K{\"u}nstliche Intelligenz f{\"u}r die Analyse pathologischer Sprache}, series = {Sprache - Stimme - Geh{\"o}r}, volume = {47}, journal = {Sprache - Stimme - Geh{\"o}r}, number = {3}, doi = {10.1055/a-2089-5778}, pages = {145-150}, year = {2023}, abstract = {Sprache kann eine Vielzahl von diagnostisch relevanten Informationen enthalten. In diesem {\"U}bersichtsartikel wird aufgezeigt, wie Methoden der K{\"u}nstlichen Intelligenz, insbesondere Maschinelles Lernen und Sprachverarbeitung, angewendet auf Sprachsignale eingesetzt werden k{\"o}nnen: zur Bewertung von Verst{\"a}ndlichkeit, zur Automatisierung von standardisierten Tests und zur Bestimmung medizinischer Skalen und Diagnosen. Eine abschließende kritischen Betrachtung von akustischen Merkmalen {\"u}ber eine Vielzahl von Pathologien gibt Grund zur Annahme, dass diese Marker tats{\"a}chlich diagnostisch relevante Informationen enthalten.}, language = {de} } @inproceedings{BraunBayerlPerezToroetal.2023, author = {Braun, Franziska and Bayerl, Sebastian P. and P{\´e}rez-Toro, Paula A. and H{\"o}nig, Florian and Lehfeld, Hartmut and Hillemacher, Thomas and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Classifying Dementia in the Presence of Depression}, doi = {10.48550/arXiv.2308.08306}, pages = {5}, year = {2023}, abstract = {Automated dementia screening enables early detection and intervention, reducing costs to healthcare systems and increasing quality of life for those affected. Depression has shared symptoms with dementia, adding complexity to diagnoses. The research focus so far has been on binary classification of dementia (DEM) and healthy controls (HC) using speech from picture description tests from a single dataset. In this work, we apply established baseline systems to discriminate cognitive impairment in speech from the semantic Verbal Fluency Test and the Boston Naming Test using text, audio and emotion embeddings in a 3-class classification problem (HC vs. MCI vs. DEM). We perform cross-corpus and mixed-corpus experiments on two independently recorded German datasets to investigate generalization to larger populations and different recording conditions. In a detailed error analysis, we look at depression as a secondary diagnosis to understand what our classifiers actually learn.}, language = {en} } @inproceedings{RiedhammerBaumannBayerletal.2023, author = {Riedhammer, Korbinian and Baumann, Ilja and Bayerl, Sebastian P. and Bocklet, Tobias and Braun, Franziska and Wagner', Dominik}, title = {Medical Speech Processing for Diagnosis and Monitoring}, pages = {1417 -- 1420}, year = {2023}, abstract = {In recent years, speech processing for medical applications got significant traction. While pioneering work in the 1990ies focused on processing sustained vowels or isolated utterances, work in the 2000s already showed, that speech recognition systems, prosodic analysis and natural language processing be used to assess a large variety of speech pathologies.Here, we give an overview of how to classify selected speech pathologies including stuttering, language development, speech intelligibility after surgery, dementia and Alzheimers, depression and state-of-mind. While each of those poses a rather well-defined problem in a lab setting, we discuss the issues when integrating such methods in a clinical workflow such as diagnosis or monitoring. Starting from the question if such detectors can be used for general screening or rather as a specialist's tool, we explore the legal and privacy-related implications: patient-doctor conversations, working with children or demented seniors, bias towards examiner or patient, on-device vs. cloud processing.We conclude with a set of open questions that should be addressed to help bringing all this research from the lab to routine clinical use.}, language = {en} } @inproceedings{WagnerBaumannBraunetal.2023, author = {Wagner, Dominik and Baumann, Ilja and Braun, Franziska and Bayerl, Sebastian P. and N{\"o}th, Elmar and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Multi-class Detection of Pathological Speech with Latent Features}, issn = {2958-1796}, doi = {10.21437/Interspeech.2023-464}, pages = {2318 -- 2322}, year = {2023}, abstract = {The detection of pathologies from speech features is usually defined as a binary classification task with one class representing a specific pathology and the other class representing healthy speech. In this work, we train neural networks, large margin classifiers, and tree boosting machines to distinguish between four pathologies: Parkinson's disease, laryngeal cancer, cleft lip and palate, and oral squamous cell carcinoma. We show that latent representations extracted at different layers of a pre-trained wav2vec 2.0 system can be effectively used to classify these types of pathological voices. We evaluate the robustness of our classifiers by adding room impulse responses to the test data and by applying them to unseen speech corpora. Our approach achieves unweighted average F1-Scores between 74.1\% and 97.0\%, depending on the model and the noise conditions used. The systems generalize and perform well on unseen data of healthy speakers sampled from a variety of different sources.}, language = {en} } @inproceedings{ChenHuangBocklet2020, author = {Chen, Wenda and Huang, Jonathan and Bocklet, Tobias}, title = {Length-and Noise-aware Training Techniques for Short-utterance Speaker Recognition}, doi = {10.48550/arXiv.2008.12218}, pages = {5}, year = {2020}, abstract = {Speaker recognition performance has been greatly improved with the emergence of deep learning. Deep neural networks show the capacity to effectively deal with impacts of noise and reverberation, making them attractive to far-field speaker recognition systems. The x-vector framework is a popular choice for generating speaker embeddings in recent literature due to its robust training mechanism and excellent performance in various test sets. In this paper, we start with early work on including invariant representation learning (IRL) to the loss function and modify the approach with centroid alignment (CA) and length variability cost (LVC) techniques to further improve robustness in noisy, far-field applications. This work mainly focuses on improvements for short-duration test utterances (1-8s). We also present improved results on long-duration tasks. In addition, this work discusses a novel self-attention mechanism. On the VOiCES far-field corpus, the combination of the proposed techniques achieves relative improvements of 7.0\% for extremely short and 8.2\% for full-duration test utterances on equal error rate (EER) over our baseline system.}, language = {en} } @inproceedings{WagnerBaumannBayerletal.2023, author = {Wagner, Dominik and Baumann, Ilja and Bayerl, Sebastian P. and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Speaker Adaptation for End-To-End Speech Recognition Systems in Noisy Environments}, doi = {10.48550/arXiv.2211.08774}, pages = {6}, year = {2023}, abstract = {We analyze the impact of speaker adaptation in end-to-end automatic speech recognition models based on transformers and wav2vec 2.0 under different noise conditions. By including speaker embeddings obtained from x-vector and ECAPA-TDNN systems, as well as i-vectors, we achieve relative word error rate improvements of up to 16.3\% on LibriSpeech and up to 14.5\% on Switchboard. We show that the proven method of concatenating speaker vectors to the acoustic features and supplying them as auxiliary model inputs remains a viable option to increase the robustness of end-to-end architectures. The effect on transformer models is stronger, when more noise is added to the input speech. The most substantial benefits for systems based on wav2vec 2.0 are achieved under moderate or no noise conditions. Both x-vectors and ECAPA-TDNN embeddings outperform i-vectors as speaker representations. The optimal embedding size depends on the dataset and also varies with the noise condition.}, language = {en} } @inproceedings{WagnerBayerlBockletetal.2023, author = {Wagner, Dominik and Bayerl, Sebastian P. and Bocklet, Tobias and Draxler, Christoph}, title = {Implementing Easy-to-Use Recipes for the Switchboard Benchmark}, publisher = {TUDpress, Dresden}, pages = {150 -- 157}, year = {2023}, abstract = {We report on our contribution of templates for tokenization, language modeling, and automatic speech recognition (ASR) on the Switchboard benchmark to the open-source general-purpose toolkit SpeechBrain. Three recipes for the training of end-to-end ASR systems were implemented. We describe their model architectures, as well as the necessary data preparation steps. The word error rates achievable with our models are comparable to or better than those of other popular toolkits. Pre-trained ASR models were made available on HuggingFace. They can be easily integrated into research projects or used directly for quick inference via a hosted inference API.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2023, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Detection of Vowel Errors in Children's Speech Using Synthetic Phonetic Transcripts}, series = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, booktitle = {2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU), Taipei, Taiwan, 2023}, doi = {10.1109/ASRU57964.2023.10389704}, pages = {1 -- 8}, year = {2023}, abstract = {The analysis of phonological processes is crucial in evaluating speech development disorders in children, but encounters challenges due to limited children audio data. This work focuses on automatic vowel error detection using a two-stage pipeline. The first stage uses a fine-tuned cross-lingual phone recognizer (wav2vec 2.0) to extract phone sequences from audio. The second stage employs a language model (BERT) for classification from a phone sequence, entirely trained on synthetic transcripts, to counteract the very broad range of potential mistakes. We evaluate the system on nonword audio recordings recited by preschool children from a speech development test. The results show that the classifier trained on synthetic data performs well, but its efficacy relies on the quality of the phone recognizer. The best classifier achieves an 94.7\% F1 score when evaluated against phonetic ground truths, whereas the F1 score is 76.2\% when using automatically recognized phone sequences.}, language = {en} } @inproceedings{BaumannWagnerBayerletal.2022, author = {Baumann, Ilja and Wagner, Dominik and Bayerl, Sebastian P. and Bocklet, Tobias}, title = {Nonwords Pronunciation Classification in Language Development Tests for Preschool Children}, series = {Interspeech 2022 : 18-22 September 2022, Incheon, Korea}, booktitle = {Interspeech 2022 : 18-22 September 2022, Incheon, Korea}, doi = {10.21437/Interspeech.2022-10777}, pages = {3643 -- 3647}, year = {2022}, abstract = {This work aims to automatically evaluate whether the language development of children is age-appropriate. Validated speech and language tests are used for this purpose to test the auditory memory. In this work, the task is to determine whether spoken nonwords have been uttered correctly. We compare different approaches that are motivated to model specific language structures: Low-level features (FFT), speaker embeddings (ECAPA-TDNN), grapheme-motivated embeddings (wav2vec 2.0), and phonetic embeddings in form of senones (ASR acoustic model). Each of the approaches provides input for VGG-like 5-layer CNN classifiers. We also examine the adaptation per non-word. The evaluation of the proposed systems was performed using recordings from different kindergartens of spoken non- words. ECAPA-TDNN and low-level FFT features do not explicitly model phonetic information; wav2vec2.0 is trained on grapheme labels, our ASR acoustic model features contain (sub-)phonetic information. We found that the more granular the phonetic modeling is, the higher are the achieved recognition rates. The best system trained on ASR acoustic model features with VTLN achieved an accuracy of 89.4\% and an area under the ROC (Receiver Operating Characteristic) curve (AUC) of 0.923. This corresponds to an improvement in accuracy of 20.2\% and AUC of 0.309 relative compared to the FFT-baseline}, language = {en} } @inproceedings{GeorgesHuangBocklet2020, author = {Georges, Munir and Huang, Jonathan and Bocklet, Tobias}, title = {Compact Speaker Embedding}, doi = {10.48550/arXiv.2008.05011}, pages = {5}, year = {2020}, abstract = {Deep neural networks (DNN) have recently been widely used in speaker recognition systems, achieving state-of-the-art performance on various benchmarks. The x-vector architecture is especially popular in this research community, due to its excellent performance and manageable computational complexity. In this paper, we present the lrx-vector system, which is the low-rank factorized version of the x-vector embedding network. The primary objective of this topology is to further reduce the memory requirement of the speaker recognition system. We discuss the deployment of knowledge distillation for training the lrx-vector system and compare against low-rank factorization with SVD. On the VOiCES 2019 far-field corpus we were able to reduce the weights by 28\% compared to the full-rank x-vector system while keeping the recognition rate constant (1.83\% EER).}, language = {en} } @inproceedings{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian P. and Wagner, Dominik and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian and Sojka, Petr and Kopeček, Ivan and Pala, Karel and Hor{\´a}k, Aleš}, title = {The Influence of Dataset Partitioning on Dysfluency Detection Systems}, publisher = {Springer International Publishing}, doi = {10.48550/arXiv.2206.03400}, pages = {14}, year = {2022}, abstract = {This paper empirically investigates the influence of different data splits and splitting strategies on the performance of dysfluency detection systems. For this, we perform experiments using wav2vec 2.0 models with a classification head as well as support vector machines (SVM) in conjunction with the features extracted from the wav2vec 2.0 model to detect dysfluencies. We train and evaluate the systems with different non-speaker-exclusive and speaker-exclusive splits of the Stuttering Events in Podcasts (SEP-28k) dataset to shed some light on the variability of results w.r.t. to the partition method used. Furthermore, we show that the SEP-28k dataset is dominated by only a few speakers, making it difficult to evaluate. To remedy this problem, we created SEP-28k-Extended (SEP-28k-E), containing semi-automatically generated speaker and gender information for the SEP-28k corpus, and suggest different data splits, each useful for evaluating other aspects of methods for dysfluency detection.}, language = {en} } @inproceedings{LopatkaBocklet2020, author = {Lopatka, Kuba and Bocklet, Tobias}, title = {State Sequence Pooling Training of Acoustic Models for Keyword Spotting}, series = {Proceedings Interspeech 2020}, booktitle = {Proceedings Interspeech 2020}, issn = {2958-1796}, doi = {10.21437/Interspeech.2020-2722}, pages = {4338 -- 4342}, year = {2020}, abstract = {We propose a new training method to improve HMM-based keyword spotting. The loss function is based on a score computed with the keyword/filler model from the entire input sequence. It is equivalent to max/attention pooling but is based on prior acoustic knowledge. We also employ a multi-task learning setup by predicting both LVCSR and keyword posteriors. We compare our model to a baseline trained on frame-wise cross entropy, with and without per-class weighting. We employ a low-footprint TDNN for acoustic modeling. The proposed training yields significant and consistent improvement over the baseline in adverse noise conditions. The FRR on cafeteria noise is reduced from 13.07\% to 5.28\% at 9 dB SNR and from 37.44\% to 6.78\% at 5 dB SNR. We obtain these results with only 600 unique training keyword samples. The training method is independent of the frontend and acoustic model topology.}, language = {en} } @inproceedings{SimicBocklet2024, author = {Simic, Christopher and Bocklet, Tobias}, title = {Self-Supervised Adaptive AV Fusion Module for Pre-Trained ASR Models}, series = {ICASSP 2024 - 2024 IEEE International}, booktitle = {ICASSP 2024 - 2024 IEEE International}, doi = {10.1109/ICASSP48485.2024.10448047}, pages = {12787 -- 12791}, year = {2024}, language = {en} }