@inproceedings{NayakBaumannBhattacharyaetal., author = {Nayak, Shravan and Baumann, Timo and Bhattacharya, Supratik and Karakanta, Alina and Negri, Matteo and Turchi, Marco}, title = {See me Speaking? Differentiating on Whether Words are Spoken On Screen or Off to Optimize Machine Dubbing}, series = {ICMI '20 Companion: Companion Publication of the 2020 International Conference on Multimodal Interaction, 25.10.2020 - 29.10.2020; Virtual Event Netherlands}, booktitle = {ICMI '20 Companion: Companion Publication of the 2020 International Conference on Multimodal Interaction, 25.10.2020 - 29.10.2020; Virtual Event Netherlands}, editor = {Truong, Khiet}, publisher = {Association for Computing Machinery}, address = {New York,NY,United States}, isbn = {9781450380027}, doi = {10.1145/3395035.3425640}, pages = {130 -- 134}, abstract = {Dubbing is the art of finding a translation from a source into a target language that can be lip-synchronously revoiced, i. e., that makes the target language speech appear as if it was spoken by the very actors all along. Lip synchrony is essential for the full-fledged reception of foreign audiovisual media, such as movies and series, as violated constraints of synchrony between video (lips) and audio (speech) lead to cognitive dissonance and reduce the perceptual quality. Of course, synchrony constraints only apply to the translation when the speaker's lips are visible on screen. Therefore, deciding whether to apply synchrony constraints requires an automatic method for detecting whether an actor's lips are visible on screen for a given stretch of speech or not. In this paper, we attempt, for the first time, to classify on- from off-screen speech based on a corpus of real-world television material that has been annotated word-by-word for the visibility of talking lips on screen. We present classification experiments in which we classify}, language = {en} } @inproceedings{KarakantaBhattacharyaNayaketal., author = {Karakanta, Alina and Bhattacharya, Supratik and Nayak, Shravan and Baumann, Timo and Negri, Matteo and Turchi, Marco}, title = {The Two Shades of Dubbing in Neural Machine Translation}, series = {Proceedings of the 28th International Conference on Computational Linguistics (COLING 2020): Barcelona, Spain (Online)}, booktitle = {Proceedings of the 28th International Conference on Computational Linguistics (COLING 2020): Barcelona, Spain (Online)}, editor = {Scott, Donia and Bel, Nuria and Zong, Chengqing}, publisher = {International Committee on Computational Linguistics}, address = {Stroudsburg, PA, USA}, doi = {10.18653/v1/2020.coling-main.382}, pages = {4327 -- 4333}, abstract = {Dubbing has two shades; synchronisation constraints are applied only when the actor's mouth is visible on screen, while the translation is unconstrained for off-screen dubbing. Consequently, different synchronisation requirements, and therefore translation strategies, are applied depending on the type of dubbing. In this work, we manually annotate an existing dubbing corpus (Heroes) for this dichotomy. We show that, even though we did not observe distinctive features between on- and off-screen dubbing at the textual level, on-screen dubbing is more difficult for MT (-4 BLEU points). Moreover, synchronisation constraints dramatically decrease translation quality for off-screen dubbing. We conclude that, distinguishing between on-screen and off-screen dubbing is necessary for determining successful strategies for dubbing-customised Machine Translation.}, language = {en} }