@inproceedings{RiedhammerHaderleinSchusteretal.2006, author = {Riedhammer, Korbinian and Haderlein, Tino and Schuster, Maria and Rosanowski, Frank and N{\"o}th, Elmar}, title = {Automatic Evaluation of Tracheoesophageal Telephone Speech}, series = {First International Language Technologies Conference (IS-LTC 2006), Ljubljana, Slovenia, October 2006.}, booktitle = {First International Language Technologies Conference (IS-LTC 2006), Ljubljana, Slovenia, October 2006.}, pages = {17 -- 22}, year = {2006}, abstract = {The tracheoesophageal (TE) substitute voice is currently state-of-the-art treatment to restore the ability to speak after laryngectomy. The intelligibility while talking over a telephone is an important clinical factor, as it is a crucial part of the patients' social life. An objective way to rate the intelligibility of substitute voices when talking over a telephone is desirable to improve the post-laryngectomy speech therapy. An automatic speech recognition (ASR) system was applied to 41 high quality recordings of post-laryngectomy patients. The ASR system was trained with normal, non-pathologic speech. It yielded a word accuracy (WA) of 36.9\%±18.0\%; compared to the intelligibility rating of a group of human experts the ASR system had a correlation coefficient of -.88. After downsampling the 41 recordings to telephone quality, the ASR system reached a WA of 26.4\%±13.9\% leading to a correlation coefficient of -.80. These results confirm that an ASR system can be used for objective intelligibility rating over the telephone.}, language = {en} } @inproceedings{RiedhammerHaderleinNoethetal.2006, author = {Riedhammer, Korbinian and Haderlein, Tino and N{\"o}th, Elmar and Toy, Hikmet and Eysholdt, Ulrich and Rosanowski, Frank}, title = {Die tracheo{\"o}sophageale Ersatzstimme: Automatische Verst{\"a}ndlichkeitsbewertung {\"u}ber das Telefon}, series = {23. Wissenschaftliche Jahrestagung der Deutschen Gesellschaft f{\"u}r Phoniatrie und P{\"a}daudiologie. Heidelberg, September 2006. Aktuelle phoniatrisch-p{\"a}daudiologische Aspekte.}, booktitle = {23. Wissenschaftliche Jahrestagung der Deutschen Gesellschaft f{\"u}r Phoniatrie und P{\"a}daudiologie. Heidelberg, September 2006. Aktuelle phoniatrisch-p{\"a}daudiologische Aspekte.}, pages = {51 -- 53}, year = {2006}, abstract = {Die tracheo{\"o}sophageale Ersatzstimme TE ist heute "state of the art" der Stimmrehabilitation nach einer Laryngektomie. In dieser Studie, einem Teilprojekt eines von der Deutschen Krebshilfe gef{\"o}rderten Forschungsvorhabens, ging es um die objektive Bewertung des Behandlungsfortschritts. Untersucht wurden 41 Laryngektomierte mit einer TE (Provox-Stimmventilprothese) durchgef{\"u}hrt. Ziel der Studie war es, die Verst{\"a}ndlichkeit im Gespr{\"a}ch und am Telefon objektiv zu beurteilen und zu vergleichen, um den Patienten in der Zukunft die telefonische Evaluation von zuhause aus zu erm{\"o}glichen. Zur Bewertung diente ein f{\"u}r Marktzwecke professionalisiertes automatisches Spracherkennungssystem. Es wurden zun{\"a}chst Nahbesprechungsaufnahmen des "Nordwind und Sonne"-Textes von f{\"u}nf Experten hinsichtlich ihrer Verst{\"a}ndlichkeit beurteilt. Aus diesen Aufnahmen entstanden durch Abspielen {\"u}ber ein Telefon simulierte Telefonaufnahmen. Zielkriterium der automatischen Analyse war die Wortakkuratheit WA, die mit der an Schulnoten orientierten Stimmbewertung durch die Experten korreliert wurde. Die Studie ergab eine Korrelation von -0,82 f{\"u}r die Nahbesprechungs- und -0,69 f{\"u}r die Telefonaufnahmen. Die Ergebnisse zeigen, dass die automatische Verst{\"a}ndlichkeitsbewertung von Ersatzstimmen auch per Telefon prinzipiell m{\"o}glich ist. M{\"o}glichkeiten, die Qualit{\"a}tsverluste durch die Telefon{\"u}bertragung und die somit niedrigere Korrelation zu kompensieren, werden aufgezeigt.}, language = {de} } @inproceedings{RiedhammerGroppNoeth2012, author = {Riedhammer, Korbinian and Gropp, Martin and N{\"o}th, Elmar}, title = {The FAU Video Lecture Browser System}, series = {2012 IEEE Spoken Language Technology Workshop (SLT), Miami, FL, USA, December 2012.}, booktitle = {2012 IEEE Spoken Language Technology Workshop (SLT), Miami, FL, USA, December 2012.}, publisher = {IEEE}, pages = {392 -- 397}, year = {2012}, abstract = {A growing number of universities and other educational institutions provide recordings of lectures and seminars as an additional resource to the students. In contrast to educational films that are scripted, directed and often shot by film professionals, these plain recordings are typically not post-processed in an editorial sense. Thus, the videos often contain longer periods of inactivity or silence, unnecessary repetitions, or corrections of prior mistakes. This paper describes the FAU Video Lecture Browser system, a web-based platform for the interactive assessment of video lectures, that helps to close the gap between a plain recording and a useful e-learning resource by displaying automatically extracted and ranked key phrases on an augmented time line based on stream graphs. In a pilot study, users of the interface were able to complete a topic localization task about 29 \% faster than users provided with the video only while achieving about the same accuracy. The user interactions can be logged on the server to collect data to evaluate the quality of the phrases and rankings, and to train systems that produce customized phrase rankings.}, language = {en} } @inproceedings{RiedhammerGroppNoeth2011, author = {Riedhammer, Korbinian and Gropp, Martin and N{\"o}th, Elmar}, title = {A Novel Lecture Browser Using Key Phrases and Stream Graphs}, series = {Lehrstuhl f{\"u}r Mustererkennung, Universit{\"a}t Erlangen-N{\"u}rnberg}, booktitle = {Lehrstuhl f{\"u}r Mustererkennung, Universit{\"a}t Erlangen-N{\"u}rnberg}, year = {2011}, abstract = {We present a novel lecture browser that utilizes ranked key phrases displayed on a stream graph to overcome the shortcomings of traditional extractive (query-based) summaries. The system extracts key phrases from the ASR transcripts, performs an unsupervised ranking, and displays an initial number of phrases on the stream graph. This graph gives an intuition of when which key phrase is spoken, and how dominant it is throughout the lecture. The user can select the phrases to be displayed and furthermore adjust the ranking of the all phrases. All user interactions are logged to a server to improve the ranking algorithms and provide user specific rankings.}, language = {en} } @inproceedings{RiedhammerGroppBockletetal.2013, author = {Riedhammer, Korbinian and Gropp, Martin and Bocklet, Tobias and H{\"o}nig, Florian and N{\"o}th, Elmar and Steidl, Stefan}, title = {LMELectures: A Multimedia Corpus of Academic Spoken English}, series = {First Workshop on Speech, Language and Audio in Multimedia (SLAM 2013), Marseille, France, August 2013, ISCA Archive.}, booktitle = {First Workshop on Speech, Language and Audio in Multimedia (SLAM 2013), Marseille, France, August 2013, ISCA Archive.}, pages = {102 -- 107}, year = {2013}, abstract = {This paper describes the acquisition, transcription and annotation of a multi-media corpus of academic spoken English, the LMELectures. It consists of two lecture se-ries that were read in the summer term 2009 at the com-puter science department of the University of Erlangen-Nuremberg, covering topics in pattern analysis, machine learning and interventional medical image processing. In total, about 40 hours of high-definition audio and video of a single speaker was acquired in a constant recording en-vironment. In addition to the recordings, the presentation slides are available in machine readable (PDF) format. The manual annotations include a suggested segmenta-tion into speech turns and a complete manual transcrip-tion that was done using BLITZSCRIBE2, a new tool for the rapid transcription. For one lecture series, the lecturer assigned key words to each recordings; one recording of that series was further annotated with a list of ranked key phrases by five human annotators each. The corpus is available for non-commercial purpose upon request.}, language = {en} } @inproceedings{RiedhammerGillickFavreetal.2008, author = {Riedhammer, Korbinian and Gillick, Dan and Favre, Benoit and Hakkani-T{\"u}r, Dilek}, title = {Packing the Meeting Summarization Knapsack}, series = {INTERSPEECH 2008, 9th Annual Conference of the International Speech Communication Association (ISCA), Brisbane, Australia, September 2008.}, booktitle = {INTERSPEECH 2008, 9th Annual Conference of the International Speech Communication Association (ISCA), Brisbane, Australia, September 2008.}, pages = {2434 -- 2437}, year = {2008}, abstract = {Despite considerable work in automatic meeting summarization over the last few years, comparing results remains difficult due to varied task conditions and evaluations. To address this issue, we present a method for determining the best possible extractive summary given an evaluation metric like ROUGE. Our oracle system is based on a knapsack-packing framework, and though NP-Hard, can be solved nearly optimally by a genetic algorithm. To frame new research results in a meaningful context, we suggest presenting our oracle results alongside two simple baselines. We show oracle and baseline results for a variety of evaluation scenarios that have recently appeared in this field.}, language = {en} } @inproceedings{RiedhammerFavreHakkaniTuer2008, author = {Riedhammer, Korbinian and Favre, Benoit and Hakkani-T{\"u}r, Dilek}, title = {A Keyphrase Based Approach to Interactive Meeting Summarization}, series = {2008 IEEE Workshop on Spoken Language Technologies (SLT), Goa, India, December 2008.}, booktitle = {2008 IEEE Workshop on Spoken Language Technologies (SLT), Goa, India, December 2008.}, pages = {153 -- 156}, year = {2008}, abstract = {Rooted in multi-document summarization, maximum marginal relevance (MMR) is a widely used algorithm for meeting summarization (MS). A major problem in extractive MS using MMR is finding a proper query: the centroid based query which is commonly used in the absence of a manually specified query, can not significantly outperform a simple baseline system. We introduce a simple yet robust algorithm to automatically extract keyphrases (KP) from a meeting which can then be used as a query in the MMR algorithm. We show that the KP based system significantly outperforms both baseline and centroid based systems. As human refined KPs show even better summarization performance, we outline how to integrate the KP approach into a graphical user interface allowing interactive summarization to match the user's needs in terms of summary length and topic focus.}, language = {en} } @article{RiedhammerFavreHakkaniTuer2010, author = {Riedhammer, Korbinian and Favre, Benoit and Hakkani-T{\"u}r, Dilek}, title = {Long story short - Global unsupervised models for keyphrase based meeting summarization}, series = {SPEECH COMMUNICATION}, volume = {2010}, journal = {SPEECH COMMUNICATION}, number = {52(10)}, pages = {801 -- 815}, year = {2010}, language = {en} } @inproceedings{RiedhammerBockletOrozcoArroyaveetal.2014, author = {Riedhammer, Korbinian and Bocklet, Tobias and Orozco-Arroyave, Juan Rafael and N{\"o}th, Elmar}, title = {Semi-Automatic Calibration for Dereverberation by Spectral Subtraction for Continuous Speech Recognition}, series = {ITG Symposium on Speech Communication 2014, Erlangen.}, booktitle = {ITG Symposium on Speech Communication 2014, Erlangen.}, publisher = {VDE VERLAG GMBH}, year = {2014}, abstract = {In this article, we describe a semi-automatic calibration algorithm for dereverberation by spectral subtraction. We verify the method by a comparison to a manual calibration derived from measured room impulse responses (RIR). We conduct extensive experiments to understand the effect of all involved parameters and to verify values suggested in the literature. The experiments are performed on a text read by 31 speakers and recorded by a headset and three far-field microphones. Results are measured in terms of automatic speech recognition (ASR) performance using a 1-gram model to emphasize acoustic recognition performance. To accommodate for the acoustic change by dereverberation we apply supervised MAP adaptation to the hidden Markov model output probabilities. The combination of dereverberation and adaptation yields a relative improvement of about 35\% in terms of word error rate (WER) compared to the original signal.}, language = {en} } @inproceedings{RiedhammerBockletNoeth2011, author = {Riedhammer, Korbinian and Bocklet, Tobias and N{\"o}th, Elmar}, title = {Compensation of Extrinsic Variability in Speaker Verification Systems on Simulated Skype and HF Channel Data}, series = {2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Prague, Czech Republic, May 2011.}, booktitle = {2011 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Prague, Czech Republic, May 2011.}, pages = {4840 -- 4843}, year = {2011}, abstract = {In this work we focus on speaker verification on channels of varying quality, namely Skype and high frequency (HF) radio. In our setup, we assume to have telephone recordings of speakers for training, but recordings of different channels for testing with varying (lower) signal quality. Starting from a Gaussian mixture / support vector machine (GMM/SVM) baseline, we evaluate multi-condition training (MCT), an ideal channel classification approach (ICC), and nuisance attribute projection (NAP) to compensate for the loss of information due to the transmission. In an evaluation on Switchboard-2 data using Skype and HF channel simulators, we show that, for good signal quality, NAP improves the baseline system performance from 5\% EER to 3.33\% EER (for both Skype and HF). For strongly distorted data, MCT or, if adequate, ICC turn out to be the method of choice.}, language = {en} }