@inproceedings{SchulerNayakSahaetal., author = {Schuler, Christian and Nayak, Shravan and Saha, Debjoy and Baumann, Timo}, title = {Can We See Your Response Before You Speak? Exploring Linguistic Information Found in Inter-Utterance Pauses}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7094}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70949}, pages = {165 -- 172}, abstract = {In this work we assess whether there is information in pauses in-between utterances of the same or different speakers that are predictive of the following speaker's utterance. We present models that connect a person's visual features before they speak to their upcoming utterance. In our experiments we find that outof-the-box pre-trained models can already reach a better-than-chance performance in correlating video embeddings to utterance embeddings. In contrast, models that attempt to predict the first word after the pause do not outperform a unigram model, indicating that our models do not read lips (based e.g. on co-articulation effects) but rather capture more fundamental aspects of the upcoming utterance.}, language = {en} } @inproceedings{SchubertSinhaKruegeretal., author = {Schubert, Martha and Sinha, Yamini and Kr{\"u}ger, Julia and Siegert, Ingo}, title = {Speech Recognition Errors in ASR Engines and Their Impact on Linguistic Analysis in Psychotherapies}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7099}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70999}, pages = {203 -- 210}, abstract = {Modern intervention planning in psychotherapies may benefit from predicting process relevant psychotherapy constructs by automated speech analysis. One essential step is the extraction of relevant linguistic speech markers by ASR engines, which because of highly sensible data, work offline. We analyze transcription errors from NeMo, Whisper, and Wav2Vec2.0, focusing on their impact on linguistic markers usually requiring high quality transcripts. By utilizing part-of-speech tagging, we examine error occurrences among different word types. The Linguistic Inquiry and Word Count (LIWC) software aids in extracting markers. We highlight challenges in transcribing spontaneous speech, prevalent in therapy, and compare results with the Mozilla CommonVoice dataset, which features read speech.}, language = {en} } @inproceedings{HarnischHillmann, author = {Harnisch, Philipp L. and Hillmann, Stefan}, title = {Empirical Evaluation of ASR and NLU in a Multimodal Dialogue System for Survey Answering}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7100}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-71007}, pages = {211 -- 218}, abstract = {PROM surveys, used to measure the effect of rehabilitation treatments, are typically filled out on paper, and often suffer from low response rates. Replacing it with a multimodal survey system, supporting touch and speech interaction, could lead to lower hurdles and therefore more data quantity. To do this, it requires task-specific training samples for the Automatic Speech Recognition (ASR) and Natural Language Understanding (NLU) to classify spoken answers into one of the standardized PROM answer options. Due to the lack of training data for medical PROM surveys, we created augmented text samples with each answer option description, combined with different templates. To improve training capabilities, introduce a proper test set, and evaluate the ASR, we also collected 1,797 real voice samples within an empirical study. Further, we incorporate the contextual knowledge of the current question into our NLU architecture by implementing one classifier for every question scale. Our results reveal that training with empirical data leads to better results than augmented data from templates and original answer option descriptions. Because of participant mislabeling of 33\% due to the ambiguity of the task, we receive overall low NLU performances with up to 51.1\% accuracy, and rank-1-accuracy up to 79.3\%. We also find that our implementation of many scale-specific NLU classifiers significantly outperforms one NLU classifier for all labels, that incorporates the same contextual knowledge after the prediction, by 8 percent points.}, language = {en} } @inproceedings{VenkateswaranAlFoysalShaiketal., author = {Venkateswaran, Siddarth and Al Foysal, Abdullah and Shaik, Nazeer Basha and B{\"o}ck, Ronald}, title = {Is there Text in Wine? - S+U Learning-based Named Entity Recognition and Triplet Extraction from Wine Aroma Descriptors}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7093}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70931}, pages = {157 -- 164}, abstract = {Wine making is usually considered a domain being far off the processing of speech and language. But in a particular aspect, the domains of speech processing and wine making are related, namely, in the description of wine aromas. These descriptors are used for creating wine expertise as well as more general (advertisement-like) textual representations. In the current paper, we use Natural Language Processing techniques, especially Named Entity Recognition, to identify Aspects and Opinions, reflecting wine characteristics. These are combined with analyses of respective relations (triplet extraction) building Aspect-Opinion-Pairs to establish indicative aroma descriptors, also trying to approach the complex interplay amongst these individual statements. In our experiments, we rely on the Falstaff corpus comprising a huge set of wine descriptions. This results in an average F1 score of around 0.85 for Aspect-Opinion classification. For triplet generation multiple strategies were compared, resulting in an average F1 score of 0.67 in this challenging task. For both tasks we rely only on a handful of manually annotated samples, applying pseudo-labeling methods from seed data to achieve automatic labeling.}, language = {en} } @inproceedings{BauerZalkowMuelleretal., author = {Bauer, Judith and Zalkow, Frank and M{\"u}ller, Meinard and Dittmar, Christian}, title = {Evaluating the Impact of Prosody Feature Normalization on the Controllability of Pitch in Speech Synthesis}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7097}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70976}, pages = {188 -- 195}, abstract = {Recent neural text-to-speech (TTS) models are able to synthesize highly natural speech signals using deep learning techniques. In practical applications, it can be desirable to have explicit control over the prosody (speech rate, fundamental frequency, and energy) of the synthesized speech. Such controllability can be achieved by adding prosody prediction modules, whose main purpose is to estimate plausible prosody features for each phoneme in the text input. This explicit modeling also allows for changing prosody features at inference time, consequently enabling the adjustment of the prosody in the synthesized audio. In this paper, we evaluate to which extent deliberate manipulation of such prosody features is reflected in the resulting speech audio. We focus particularly on changing the pitch (i.e., fundamental frequency) while applying different normalization strategies.}, language = {en} } @inproceedings{SinhaHintzSiegert, author = {Sinha, Yamini and Hintz, Jan and Siegert, Ingo}, title = {Evaluation of Audio Deepfakes - Systematic Review}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7096}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70960}, pages = {181 -- 187}, abstract = {Generative models for audio are commonly used for music composition, sound effects generation for video game development, audio restoration, voice cloning, etc. The ease of generating indistinguishable fake audio with deep learning poses a major threat to personal privacy, online security, and political discourse. Evaluating the quality and realism of these synthetic utterances is crucial for mitigating the potential for misinformation and harm. To assess this threat, this paper conducts a systematic review, using Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA), on how these deepfake models are currently evaluated. The analysis of 86 papers shows that the majority of the evaluation is conducted on a machine level and highlights a research gap regarding the human perception of deepfakes. This paper explores various methods and perceptual measures employed in assessing audio deepfakes and evaluating their strengths, limitations, and future directions.}, language = {en} } @inproceedings{Sering, author = {Sering, Konstantin}, title = {Speech/Non-Speech Classification Slightly Improves Synthesis Quality in PAULE}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7095}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70955}, pages = {173 -- 180}, abstract = {One of the tasks PAULE[1, 2] solves is finding suitable control parameter (cp-)trajectories for a given target acoustic. These cp-trajectories can be used to synthesize speech with the articulatory speech synthesizer of the VocalTractLab (VTL) [3]. If the target acoustic contains substantial microphone noise or other background noises, occasionally PAULE optimizes not for the speech in the target, but for this background noises. By adding a speech/non-speech classifier to the feedback and planning-loop in PAULE this resynthesis of background noises should be mitigated. Unfortunately, the improvements were minor, which might be due to uninformative gradients of the classifier. The importance of informative gradients and the use classifiers to adapt PAULE to different tasks are explained and discussed.}, language = {en} } @inproceedings{MousaviGrawunder, author = {Mousavi, Neda and Grawunder, Sven}, title = {The influence of signal segmentation methods on rhythm-bassed speaker recognition}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7101}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-71014}, pages = {225 -- 232}, abstract = {This study investigates the effects of speech segmentation methods on speaker recognition models, particularly with regard to the use of rhythmic feature sets. Using three automatic methods and one manual method on the German database of Kiel corpus, segmentation was performed based on the identification of vowel onsets. Subsequently rhythmic variability indices derived from these intervals were calculated and used for principal component analysis and support vector machine model in order to investigate the variation among speakers. The results underline the influence of signal segmentation methods on speaker recognition models.}, language = {en} } @inproceedings{WiesmaierLippertKaratsiolisetal., author = {Wiesmaier, Alexander and Lippert, Markus and Karatsiolis, Vangelis and Raptis, Georgios and Buchmann, Johannes}, title = {An Evaluated Certification Services System for the German National Root CA - Legally Binding and Trustworthy Transactions in E-Business and E-Government}, series = {Proceedings of The 2005 International Conference on E-Business, Enterprise Information Systems, E-Government, and Outsourcing, EEE 2005, Las Vegas, Nevada, USA, June 20-23, 2005}, booktitle = {Proceedings of The 2005 International Conference on E-Business, Enterprise Information Systems, E-Government, and Outsourcing, EEE 2005, Las Vegas, Nevada, USA, June 20-23, 2005}, publisher = {CSREA Press}, isbn = {1-932415-76-9}, pages = {103 -- 108}, language = {en} } @inproceedings{MeyerSickendiekHusseinBaumann, author = {Meyer-Sickendiek, Burkhard and Hussein, Hussein and Baumann, Timo}, title = {Recognizing Modern Sound Poetry with LSTM Networks}, series = {Proceedings of Elektronische Sprachsignalverarbeitung (ESSV)}, booktitle = {Proceedings of Elektronische Sprachsignalverarbeitung (ESSV)}, editor = {Berton, Andr{\´e} and Haiber, Udo and Minker, Wolfgang}, publisher = {TUDpress}, address = {Ulm, Germany}, isbn = {978-3-959081-28-3}, pages = {192 -- 199}, abstract = {Our paper focuses on the computational analysis of "readout poetry" (german: H{\"o}rdichtung) - recordings of poets reading their own work - with regards to the most important type of this genre, the modern "sound poetry" (german: Lautdichtung). Whereas "readout poetry" often uses normal words and sentences, the "sound poetry", developed by dadaistic poets like Hugo Ball and Kurt Schwitters or concrete poets like Ernst Jandl, Oskar Pastior, or Bob Cobbing, combines the "microparticles of the human voice" like the segments in Ernst Jandls sound poem "schtzngrmm" ("schtzngrmm / schtzngrmm / tttt / tttt / grrrmmmmm / tttt / sch / tzngrmm"). Within the genre of sound poetry, there are two main forms: The lettristic and the syllabic decomposition. A short anecdote will explain this difference: The dadaist Raoul Hausmann developed the lettristic sound poetry in his early dadaistic poem "fmsbw" from 1918. This is said to have inspired his successor Schwitters, whose famous "Ursonate" [The Sonata in Primal Speech] begins with the words "F{\"u}mms b{\"o} w{\"o} t{\"a}{\"a} z{\"a}{\"a} Uu". With the "Ursonate", Schwitters developed a syllabic variation of the lettristic poems of Hausmann. The paper shows how to train a bidirectional LSTM network in order to differ between these "dadaistic" sound poems and the "normal" read out poems. In a further step, we will also show how to distinguish between the lettristic and the syllabic decomposition. Based on a bidirectional LSTM network that reads encodings of the character sequence in the poem and uses the output of each directional layer, we identify poems of the sound poetry genre and differentiate between its two types of compositions. The classification results of sound poetry vs. other poetry as well as lettristic vs. syllabic decomposition are with a high performance, yielding a f-scores of 0.86 and 0.84, respectively.}, subject = {Sprachverarbeitung}, language = {en} }