@article{BaumannKoehnHennig, author = {Baumann, Timo and K{\"o}hn, Arne and Hennig, Felix}, title = {The Spoken Wikipedia Corpus collection: Harvesting, alignment and an application to hyperlistening}, series = {Language Resources and Evaluation}, volume = {53}, journal = {Language Resources and Evaluation}, number = {2}, publisher = {Springer Nature}, doi = {10.1007/s10579-017-9410-y}, pages = {303 -- 329}, abstract = {Spoken corpora are important for speech research, but are expensive to create and do not necessarily reflect (read or spontaneous) speech 'in the wild'. We report on our conversion of the preexisting and freely available Spoken Wikipedia into a speech resource. The Spoken Wikipedia project unites volunteer readers of Wikipedia articles. There are initiatives to create and sustain Spoken Wikipedia versions in many languages and hence the available data grows over time. Thousands of spoken articles are available to users who prefer a spoken over the written version. We turn these semi-structured collections into structured and time-aligned corpora, keeping the exact correspondence with the original hypertext as well as all available metadata. Thus, we make the Spoken Wikipedia accessible for sustainable research. We present our open-source software pipeline that downloads, extracts, normalizes and text-speech aligns the Spoken Wikipedia. Additional language versions can be exploited by adapting configuration files or extending the software if necessary for language peculiarities. We also present and analyze the resulting corpora for German, English, and Dutch, which presently total 1005 h and grow at an estimated 87 h per year. The corpora, together with our software, are available via http://islrn.org/resources/684-927-624-257-3/. As a prototype usage of the time-aligned corpus, we describe an experiment about the preferred modalities for interacting with information-rich read-out hypertext. We find alignments to help improve user experience and factual information access by enabling targeted interaction.}, language = {en} } @inproceedings{KoehnBaumann, author = {K{\"o}hn, Arne and Baumann, Timo}, title = {Predictive Incremental Parsing Helps Language Modeling}, series = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, Osaka, Japan}, booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, Osaka, Japan}, publisher = {The COLING 2016 Organizing Committee}, pages = {268 -- 277}, abstract = {Predictive incremental parsing produces syntactic representations of sentences as they are produced, e.g. by typing or speaking. In order to generate connected parses for such unfinished sentences, upcoming word types can be hypothesized and structurally integrated with already realized words. For example, the presence of a determiner as the last word of a sentence prefix may indicate that a noun will appear somewhere in the completion of that sentence, and the determiner can be attached to the predicted noun. We combine the forward-looking parser predictions with backward-looking N-gram histories and analyze in a set of experiments the impact on language models, i.e. stronger discriminative power but also higher data sparsity. Conditioning N-gram models, MaxEnt models or RNN-LMs on parser predictions yields perplexity reductions of about 6\%. Our method (a) retains online decoding capabilities and (b) incurs relatively little computational overhead which sets it apart from previous approaches that use syntax for language modeling. Our method is particularly attractive for modular systems that make use of a syntax parser anyway, e.g. as part of an understanding pipeline where predictive parsing improves language modeling at no additional cost.}, language = {en} } @inproceedings{KoehnStegenBaumann, author = {K{\"o}hn, Arne and Stegen, Florian and Baumann, Timo}, title = {Mining the Spoken Wikipedia for Speech Data and Beyond}, series = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)}, booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)}, publisher = {European Language Resources Association (ELRA)}, pages = {4644 -- 4647}, abstract = {We present a corpus of time-aligned spoken data of Wikipedia articles as well as the pipeline that allows to generate such corpora for many languages. There are initiatives to create and sustain spoken Wikipedia versions in many languages and hence the data is freely available, grows over time, and can be used for automatic corpus creation. Our pipeline automatically downloads and aligns this data. The resulting German corpus currently totals 293h of audio, of which we align 71h in full sentences and another 86h of sentences with some missing words. The English corpus consists of 287h, for which we align 27h in full sentence and 157h with some missing words. Results are publically available.}, language = {en} } @inproceedings{KollerBaumannKoehn, author = {Koller, Alexander and Baumann, Timo and K{\"o}hn, Arne}, title = {DialogOS: Simple and extensible dialog modeling}, series = {Proceedings of Interspeech: Hyderabad, India 2-6 September 2018}, booktitle = {Proceedings of Interspeech: Hyderabad, India 2-6 September 2018}, address = {Hyderabad, India}, abstract = {We present the open-source extensible dialog manager DialogOS. DialogOS features simple finite-state based dialog management (which can be expanded to more complex DM strategies via a full-fledged scripting language) in combination with integrated speech recognition and synthesis in multiple languages. DialogOS runs on all major platforms, provides a simple-to-use graphical interface and can easily be extended via well-defined plugin and client interfaces, or can be integrated server-side into larger existing software infrastructures. We hope that DialogOS will help foster research and teaching given that it lowers the bar of entry into building and testing spoken dialog systems and provides paths to extend one's system as development progresses.}, language = {en} } @inproceedings{KoehnBaumannDoerfler, author = {K{\"o}hn, Arne and Baumann, Timo and D{\"o}rfler, Oskar}, title = {An Empirical Analysis of the Correlation of Syntax and Prosody}, series = {Proceedings of Interspeech 2018, 02.-06.09.2018, Hyderabad}, booktitle = {Proceedings of Interspeech 2018, 02.-06.09.2018, Hyderabad}, doi = {10.21437/Interspeech.2018-2530}, pages = {2157 -- 2161}, abstract = {The relation of syntax and prosody (the syntax-prosody interface) has been an active area of research, mostly in linguistics and typically studied under controlled conditions. More recently, prosody has also been successfully used in the data-based training of syntax parsers. However, there is a gap between the controlled and detailed study of the individual effects between syntax and prosody and the large-scale application of prosody in syntactic parsing with only a shallow analysis of the respective influences. In this paper, we close the gap by investigating the significance of correlations of prosodic realization with specific syntactic functions using linear mixed effects models in a very large corpus of read-out German encyclopedic texts. Using this corpus, we are able to analyze prosodic structuring performed by a diverse set of speakers while they try to optimize factual content delivery. After normalization by speaker, we obtain significant effects, e.g. confirming that the subject function, as compared to the object function, has a positive effect on pitch and duration of a word, but a negative effect on loudness.}, language = {en} }