@inproceedings{GeislingerMildeBaumannetal., author = {Geislinger, Robert and Milde, Benjamin and Baumann, Timo and Biemann, Chris}, title = {Live Subtitling for BigBlueButton with Open-Source Software}, series = {INTERSPEECH 2021: (Show \& Tell Contribution), 30 August - 3 September, 2021, Brno, Czechia}, booktitle = {INTERSPEECH 2021: (Show \& Tell Contribution), 30 August - 3 September, 2021, Brno, Czechia}, address = {Brno, Czechia}, pages = {3319 -- 3320}, abstract = {We present an open source plugin for live subtitling in the popular open source video conferencing software BigBlueBut-ton. Our plugin decodes each speaker's audio stream separately and in parallel, thereby obliviating the need for speaker di-arization and seamlessly handling overlapped talk. Any Kaldi-compatible nnet3 model can be used with our plugin and we demonstrate it using freely available TDNN-HMM-based ASR models for English and German. Our subtitles can be used as they are (e.g., in loud environments) or can form the basis for further NLP processes. Our tool can also simplify the collection of remotely recorded multi-party dialogue corpora.}, language = {en} } @inproceedings{MildeGeislingerLindtetal., author = {Milde, Benjamin and Geislinger, Robert and Lindt, Irina and Baumann, Timo}, title = {Open Source Automatic Lecture Subtitling}, series = {Proceedings of Elektronische Sprachsignalverarbeitung (ESSV): Tagungsband der 32. Konferenz, Berlin, 3.-5. M{\"a}rz 2021}, booktitle = {Proceedings of Elektronische Sprachsignalverarbeitung (ESSV): Tagungsband der 32. Konferenz, Berlin, 3.-5. M{\"a}rz 2021}, publisher = {F{\"o}rderverein Elektronische Sprachsignalverabeitung e.V.}, address = {Berlin, Germany}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:18-228-7-2523}, pages = {128 -- 135}, abstract = {We present a fully automatic solution for German video subtitling, with a focus on lecture videos. We rely entirely on open source models and scripts for German ASR, automatic punctuation reconstruction and subtitle segmentation. All training scripts, 1000h of German speech training data, pre-trained models and the final subtitling program are publicly available. It can readily be integrated into lecture video platforms such as Lecture2Go. The automatically generated subtitles can also serve as a basis to make the video material more accessible (e.g. via search, keyword clouds, and the like) or for further manual revision, potentially helping in significantly speeding up manual work. A particular challenge that we observe in lectures are technical terms that are frequent in a particular lecture, but infrequent in a typical language model and that might be out of vocabulary for a general purpose ASR. We approach this challenge by extracting texts from accompanying lecture slides to adapt the language model of our TDNN-HMM based ASR system. We demonstrate the usability of the full system and its generated subtitles and evaluate on a dataset of manually transcribed lectures with an average of 26.3\% WER.}, language = {en} }