@inproceedings{NayakSchulerSahaetal., author = {Nayak, Shravan and Schuler, Christian and Saha, Debjoy and Baumann, Timo}, title = {A Deep Dive Into Neural Synchrony Evaluation for Audio-visual Translation}, series = {ICMI '22, Proceedings of the 2022 International Conference on Multimodal Interaction: November 7 - 11 2022, Bengaluru India}, booktitle = {ICMI '22, Proceedings of the 2022 International Conference on Multimodal Interaction: November 7 - 11 2022, Bengaluru India}, editor = {Tumuluri, Raj and Sebe, Nicu and Pingali, Gopal}, publisher = {Association for Computing Machinery}, address = {New York}, isbn = {978-1-4503-9390-4}, doi = {10.1145/3536221.3556621}, pages = {642 -- 647}, abstract = {We present a comprehensive analysis of the neural audio-visual synchrony evaluation tool SyncNet. We assess the agreement of SyncNet scores vis-a-vis human perception and whether we can use these as a reliable metric for evaluating audio-visual lip-synchrony in generation tasks with no ground truth reference audio-video pair. We further look into the underlying elements in audio and video which vitally affect synchrony using interpretable explanations from SyncNet predictions and analyse its susceptibility by introducing adversarial noise. SyncNet has been used in numerous papers on visually-grounded text-to-speech for scenarios such as dubbing. We focus on this scenario which features many local asynchronies (something that SyncNet isn't made for).}, language = {en} } @inproceedings{SchulerNayakSahaetal., author = {Schuler, Christian and Nayak, Shravan and Saha, Debjoy and Baumann, Timo}, title = {Can We See Your Response Before You Speak? Exploring Linguistic Information Found in Inter-Utterance Pauses}, series = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, booktitle = {Elektronische Sprachsignalverarbeitung 2024, Tagungsband der 35. Konferenz, Regensburg, 6.-8. M{\"a}rz 2024}, editor = {Baumann, Timo}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-325-6}, doi = {10.35096/othr/pub-7094}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70949}, pages = {165 -- 172}, abstract = {In this work we assess whether there is information in pauses in-between utterances of the same or different speakers that are predictive of the following speaker's utterance. We present models that connect a person's visual features before they speak to their upcoming utterance. In our experiments we find that outof-the-box pre-trained models can already reach a better-than-chance performance in correlating video embeddings to utterance embeddings. In contrast, models that attempt to predict the first word after the pause do not outperform a unigram model, indicating that our models do not read lips (based e.g. on co-articulation effects) but rather capture more fundamental aspects of the upcoming utterance.}, language = {en} }