@inproceedings{BaumannPaetzelSchlesingeretal., author = {Baumann, Timo and Paetzel, Maike and Schlesinger, Philipp and Menzel, Wolfgang}, title = {Using Affordances to Shape the Interaction in a Hybrid Spoken Dialogue System}, series = {Elektronische Sprachsignalverarbeitung 2013 : Tagungsband der 24. Konferenz, Bielefeld, 26. - 28.3.2013 ; [ESSV 2013]}, booktitle = {Elektronische Sprachsignalverarbeitung 2013 : Tagungsband der 24. Konferenz, Bielefeld, 26. - 28.3.2013 ; [ESSV 2013]}, publisher = {TUDpress}, isbn = {978-3-94431-03-4}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:18-228-7-1836}, pages = {12 -- 19}, abstract = {Affordances manifest possibilities of interaction with a spoken dialogsystem. For example, the act of asking a question affords to the recipient thepossibility of answering. In the system we present, the observable act of maneuveringaffords the possibility of controlling a motion. Our system thus uses the affordanceprinciple to shape the interaction: to trigger the usage of instructions that are easyto understand and process, the system gives immediate visual feedback to afforduser commands that can then be reacted upon. This tightening of the interactionloop requires an incremental processing paradigm to allow fast reactions and to beable to alter ongoing system actions. Our system is a hybrid of incremental andnon-incremental processing components, combining conventional, state graph-basedprocessing, which has the advantage of widely available toolkits and well-understooddialog management, with incremental dialog processing which allows for the tightfeedback loop that provides for quick reactions. We tested our approach in a smalluser study and found that users used simpler and setting-independent commandsmore often and were more efficient when faced with the affordance-based version ofour system.}, language = {en} } @misc{BaumannGeiserMenzeletal., author = {Baumann, Timo and Geiser, Dorothee and Menzel, Wolfgang and Mohr, Mario and Neef, Svenja and Nykamp, S{\"o}ren and Rokita, Nils}, title = {Concurrent Sub-turn Interaction Specification and Dialogue Management with an Application to Interactive Storytelling}, series = {GSCL Workshop Gesprochene Sprache und Sprachverarbeitung (GSS), Darmstadt, Germany, 2013}, journal = {GSCL Workshop Gesprochene Sprache und Sprachverarbeitung (GSS), Darmstadt, Germany, 2013}, address = {Darmstadt, Germany}, abstract = {Conventional dialogue management centers around an interaction style that is best described as a ping-pong game, with full turns being the units at which speech is delivered (and expected) by the system, which greatly simplifies the interaction management, delivery and understanding components of the system. While the resulting mode of interaction works well for task-based systems, it is insufficient for more conversational interaction styles, where content is delivered and grounded in units finer than full turns (Poesio and Traum 1997) and where turns are delivered concurrently by both interlocutors and hence overlap more frequently. One domain with particularly frequent overlapping contributions is interactive storytelling: a storyteller that is responsive to listeners will integrate their feedback immediately while still speaking a current contribution, and listener's remarks or propositions regarding the story will typically be uttered immediately when the related content is delivered. We present our work on a dialogue manager that leverages recent advances in incremental speech delivery and reception (Baumann 2013) to provide for an interactive and concurrent storytelling experience. Our system uses an interaction graph, which uses a word-by-word granularity and allows to specify for individual stretches of speech where (and with what content) users may interrupt/comment, how this is interpreted, and how it is integrated into the storytelling process, thus providing for in-utterance alternatives to be spoken even without audibly interrupting the system's ongoing utterance. The system's interaction graph is specified in an XML language and may be hand-crafted by a story designer but can also be automatically generated. In our current system, speech recognition results are interpreted only when the user utterance is finished; however, we plan to integrate incremental speech recognition and understanding capabilities, and to immediately react to the start of user contributions.}, language = {en} } @misc{PaetzelSchlesingerPricopetal., author = {Paetzel, Maike and Schlesinger, Philipp and Pricop, Mircea and Comaneci, Radu and Baumann, Timo and Menzel, Wolfgang}, title = {Inkrementelle und zustandsbasierte Verarbeitung in einem hybriden Sprachdialogsystem}, language = {en} }