@inproceedings{BaumannWagnerRiedhammeretal.2025, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Pathology-Aware Speech Encoding and Data Augmentation for Dysarthric Speech Recognition}, series = {Interspeech 2025}, booktitle = {Interspeech 2025}, publisher = {ISCA}, address = {ISCA}, issn = {2958-1796}, doi = {10.21437/Interspeech.2025-2724}, pages = {3289 -- 3293}, year = {2025}, abstract = {Automatic speech recognition (ASR) for pathologic speech remains a major challenge due to high variability in articulation, phonation, and prosody distortions. In this work, we propose a pathology-aware speech encoder based on BEST-RQ pre-training, which incorporates 46k hours of speech, including pathologic and atypical speech. We continue pre-training for domain adaptation and experiment with etiology-specific codebooks. We achieve a 13.2\% relative word error rate (WER) improvement using the pathology-aware speech encoder with etiology-specific continued pre-training. Additionally, we examine the impact of incorporating synthetic and out-of-domain (OOD) data to further enhance ASR performance. Synthetic data reduces WER by up to 8.7\%, while OOD data improves WER by 12.2\%. Finally, we introduce a semantic similaritybased data augmentation technique to optimize data selection, achieving a WER improvement of up to 9.7\% while minimizing the need for additional training data.}, language = {en} } @inproceedings{BaumannWagnerRiedhammeretal.2025, author = {Baumann, Ilja and Wagner, Dominik and Riedhammer, Korbinian and Bocklet, Tobias}, title = {Optimized Self-supervised Training with BEST-RQ for Speech Recognition}, series = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher = {IEEE}, doi = {10.1109/ICASSP49660.2025.10889362}, pages = {1 -- 5}, year = {2025}, abstract = {Self-supervised learning has been successfully used for various speech related tasks, including automatic speech recognition. BERT-based Speech pre-Training with Random-projection Quantizer (BEST-RQ) has achieved state-of-the-art results in speech recognition. In this work, we further optimize the BEST-RQ approach using Kullback-Leibler divergence as an additional regularizing loss and multicodebook extension per cluster derived from low-level feature clustering. Preliminary experiments on train-100 split of LibriSpeech result in a relative improvement of 11.2\% on test-clean by using multiple codebooks, utilizing a combination of cross-entropy and Kullback-Leibler divergence further reduces the word error rate by 4.5\%. The proposed optimizations on full LibriSpeech pre-training and fine-tuning result in relative word error rate improvements of up to 23.8\% on test-clean and 30.6\% on testother using 6 codebooks. Furthermore, the proposed setup leads to faster convergence in pre-training and fine-tuning and additionally stabilizes the pre-training.}, language = {en} }