@inproceedings{TammewarBraunRoccabrunaetal.2022, author = {Tammewar, Aniruddha and Braun, Franziska and Roccabruna, Gabriel and Bayerl, Sebastian P. and Riedhammer, Korbinian and Riccardi, Giuseppe}, title = {Annotation of Valence for Spoken Personal Narratives}, pages = {10}, year = {2022}, abstract = {Personal Narrative (PN) is the recollection of individuals' life experiences, events, and thoughts along with the associated emotions in the form of a story. Compared to other genres such as social media texts or microblogs, where people write about ex-perienced events or products, the spoken PNs are complex to analyze and understand. They are usually long and unstructured, involving multiple and related events, characters as well as thoughts and emotions associated with events, objects, and persons. In spoken PNs, emotions are conveyed by changing the speech signal characteristics as well as the lexical content of the narrative. In this work, we annotate a corpus of spoken personal narratives, with the emotion valence using discrete values. The PNs are segmented into speech segments, and the annotators annotate them in the discourse context, with values on a 5 point bipolar scale ranging from -2 to +2 (0 for neutral). In this way, we capture the unfolding of the PNs events and changes in the emotional state of the narrator. We perform an in-depth analysis of the inter-annotator agreement, the relation between the label distribution w.r.t. the stimulus (positive/negative) used for the elicitation of the narrative, and compare the segment-level annotations to a baseline continuous annotation. We find that the neutral score plays an important role in the agreement. We observe that it is easy to differentiate the positive from the negative valence while the confusion with the neutral label is high.}, language = {en} } @inproceedings{BundschererSchmittBayerletal.2022, author = {Bundscherer, Maximilian and Schmitt, Thomas H. and Bayerl, Sebastian P. and Auerbach, Thomas and Bocklet, Tobias}, title = {An Acoustical Machine Learning Approach to Determine Abrasive Belt Wear of Wide Belt Sanders}, series = {2022 IEEE Sensors}, volume = {2022}, booktitle = {2022 IEEE Sensors}, publisher = {IEEE}, isbn = {978-1-6654-8464-0}, doi = {10.1109/SENSORS52175.2022.9967324}, pages = {4}, year = {2022}, abstract = {This paper describes a machine learning approach to determine the abrasive belt wear of wide belt sanders used in industrial processes based on acoustic data, regardless of the sanding process-related parameters, Feed speed, Grit Size, and Type of material. Our approach utilizes Decision Tree, Random Forest, k-nearest Neighbors, and Neural network Classifiers to detect the belt wear from Spectrograms, Mel Spectrograms, MFCC, IMFCC, and LFCC, yielding an accuracy of up to 86.1\% on five levels of belt wear. A 96\% accuracy could be achieved with different Decision Tree Classifiers specialized in different sanding parameter configurations. The classifiers could also determine with an accuracy of 97\% if the machine is currently sanding or is idle and with an accuracy of 98.4\% and 98.8\% detect the sanding parameters Feed speed and Grit Size. We can show that low-dimensional mappings of high-dimensional features can be used to visualize belt wear and sanding parameters meaningfully.}, language = {en} } @inproceedings{BaumannWagnerBayerletal.2022, author = {Baumann, Ilja and Wagner, Dominik and Bayerl, Sebastian P. and Bocklet, Tobias}, title = {Nonwords Pronunciation Classification in Language Development Tests for Preschool Children}, series = {Interspeech 2022}, volume = {2022}, booktitle = {Interspeech 2022}, publisher = {ISCA}, issn = {2958-1796}, doi = {10.21437/interspeech.2022-10777}, pages = {3643 -- 3647}, year = {2022}, abstract = {This work aims to automatically evaluate whether the language development of children is age-appropriate. Validated speech and language tests are used for this purpose to test the auditory memory. In this work, the task is to determine whether spoken nonwords have been uttered correctly. We compare different approaches that are motivated to model specific language structures: Low-level features (FFT), speaker embeddings (ECAPA-TDNN), grapheme-motivated embeddings (wav2vec 2.0), and phonetic embeddings in form of senones (ASR acoustic model). Each of the approaches provides input for VGG-like 5-layer CNN classifiers. We also examine the adaptation per nonword. The evaluation of the proposed systems was performed using recordings from different kindergartens of spoken nonwords. ECAPA-TDNN and low-level FFT features do not explicitly model phonetic information; wav2vec2.0 is trained on grapheme labels, our ASR acoustic model features contain (sub-)phonetic information. We found that the more granular the phonetic modeling is, the higher are the achieved recognition rates. The best system trained on ASR acoustic model features with VTLN achieved an accuracy of 89.4\% and an area under the ROC (Receiver Operating Characteristic) curve (AUC) of 0.923. This corresponds to an improvement in accuracy of 20.2\% and AUC of 0.309 relative compared to the FFT-baseline.}, language = {en} } @article{BayerlWagnerBaumannetal.2023, author = {Bayerl, Sebastian P. and Wagner, Dominik and Baumann, Ilja and Bocklet, Tobias and Riedhammer, Korbinian}, title = {Detecting Vocal Fatigue with Neural Embeddings}, series = {Journal of Voice}, journal = {Journal of Voice}, publisher = {Elsevier BV}, issn = {0892-1997}, doi = {10.1016/j.jvoice.2023.01.012}, pages = {11}, year = {2023}, abstract = {Vocal fatigue refers to the feeling of tiredness and weakness of voice due to extended utilization. This paper investigates the effectiveness of neural embeddings for the detection of vocal fatigue. We compare x-vectors, ECAPA-TDNN, and wav2vec 2.0 embeddings on a corpus of academic spoken English. Low-dimensional mappings of the data reveal that neural embeddings capture information about the change in vocal characteristics of a speaker during prolonged voice usage. We show that vocal fatigue can be reliably predicted using all three types of neural embeddings after 40 minutes of continuous speaking when temporal smoothing and normalization are applied to the extracted embeddings. We employ support vector machines for classification and achieve accuracy scores of 81\% using x-vectors, 85\% using ECAPA-TDNN embeddings, and 82\% using wav2vec 2.0 embeddings as input features. We obtain an accuracy score of 76\%, when the trained system is applied to a different speaker and recording environment without any adaptation.}, language = {en} } @incollection{BayerlWagnerNoethetal.2022, author = {Bayerl, Sebastian P. and Wagner, Dominik and N{\"o}th, Elmar and Bocklet, Tobias and Riedhammer, Korbinian}, title = {The Influence of Dataset Partitioning on Dysfluency Detection Systems}, series = {Text, Speech, and Dialogue}, booktitle = {Text, Speech, and Dialogue}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {9783031162695}, issn = {0302-9743}, doi = {10.1007/978-3-031-16270-1_35}, pages = {423 -- 436}, year = {2022}, abstract = {This paper empirically investigates the influence of different data splits and splitting strategies on the performance of dysfluency detection systems. For this, we perform experiments using wav2vec 2.0 models with a classification head as well as support vector machines (SVM) in conjunction with the features extracted from the wav2vec 2.0 model to detect dysfluencies. We train and evaluate the systems with different non-speaker-exclusive and speaker-exclusive splits of the Stuttering Events in Podcasts (SEP-28k) dataset to shed some light on the variability of results w.r.t. to the partition method used. Furthermore, we show that the SEP-28k dataset is dominated by only a few speakers, making it difficult to evaluate. To remedy this problem, we created SEP-28k-Extended (SEP-28k-E), containing semi-automatically generated speaker and gender information for the SEP-28k corpus, and suggest different data splits, each useful for evaluating other aspects of methods for dysfluency detection.}, language = {en} } @inproceedings{WagnerBayerlMarurietal.2023, author = {Wagner, Dominik and Bayerl, Sebastian P. and Maruri, Hector A. Cordourier and Bocklet, Tobias}, title = {Generative Models for Improved Naturalness, Intelligibility, and Voicing of Whispered Speech}, series = {2022 IEEE Spoken Language Technology Workshop (SLT)}, booktitle = {2022 IEEE Spoken Language Technology Workshop (SLT)}, publisher = {IEEE}, isbn = {979-8-3503-9690-4}, doi = {10.1109/SLT54892.2023.10022796}, pages = {943 -- 948}, year = {2023}, abstract = {This work adapts two recent architectures of generative models and evaluates their effectiveness for the conversion of whispered speech to normal speech. We incorporate the normal target speech into the training criterion of vector-quantized variational autoencoders (VQ-VAEs) and Mel-GANs, thereby conditioning the systems to recover voiced speech from whispered inputs. Objective and subjective quality measures indicate that both VQ-VAEs and MelGANs can be modified to perform the conversion task. We find that the proposed approaches significantly improve the Mel cepstral distortion (MCD) metric by at least 25\% relative to a Disco-GAN baseline. Subjective listening tests suggest that the MelGAN-based system significantly improves naturalness, intelligibility, and voicing compared to the whispered input speech. A novel evaluation measure based on differences between latent speech representations also indicates that our MelGAN-based approach yields improvements relative to the baseline.}, language = {en} } @inproceedings{WagnerBayerlBaumannetal.2024, author = {Wagner, Dominik and Bayerl, Sebastian P. and Baumann, Ilja and Riedhammer, Korbinian and N{\"o}th, Elmar and Bocklet, Tobias}, title = {Large Language Models for Dysfluency Detection in Stuttered Speech}, doi = {10.48550/arXiv.2406.11025}, pages = {6}, year = {2024}, abstract = {Accurately detecting dysfluencies in spoken language can help to improve the performance of automatic speech and language processing components and support the development of more inclusive speech and language technologies. Inspired by the recent trend towards the deployment of large language models (LLMs) as universal learners and processors of non-lexical inputs, such as audio and video, we approach the task of multi-label dysfluency detection as a language modeling problem. We present hypotheses candidates generated with an automatic speech recognition system and acoustic representations extracted from an audio encoder model to an LLM, and finetune the system to predict dysfluency labels on three datasets containing English and German stuttered speech. The experimental results show that our system effectively combines acoustic and lexical information and achieves competitive results on the multi-label stuttering detection task.}, language = {en} }