@incollection{RattayGeigerHerderetal.2007, author = {Rattay, Oliver and Geiger, Christian and Herder, Jens and Goebbels, Gernot and Nikitin, Igor}, title = {Zweih{\"a}ndige Interaktion in VR-Umgebungen}, series = {Augmented \& Virtual Reality in der Produktentstehung}, volume = {209}, booktitle = {Augmented \& Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, isbn = {978-3-939350-28-6}, pages = {315 -- 332}, year = {2007}, abstract = {Einfach benutzbare VR-Anwendungen erfordern andere Interaktionstechniken als konventionelle Desktop-Anwendungen mit Maus, Tastatur und Desktop-Metapher zur Verf{\"u}gung stellen. Da solche Ans{\"a}tze in Konzeption und Realisierung deutlicher komplexer sind, m{\"u}ssen diese mit Sorgfalt ausgew{\"a}hlt werden. Folgt man der Argumentation, dass VR eine nat{\"u}rliche Interaktion mit virtuellen Objekten erm{\"o}glicht, so f{\"u}hrt dies fast zwangsl{\"a}ufig zu zweih{\"a}ndigen Interaktionstechniken f{\"u}r virtuelle Umgebungen, da Benutzer in realen Umgebungen gewohnt sind, fast ausschlie{\"i}‚lich zweih{\"a}ndig zu agieren. In diesem Beitrag geben wir eine {\"U}bersicht {\"u}ber den Stand der Technik im Bereich zweih{\"a}ndiger Interaktion, leiten Anforderungen an eine Entwicklung zweih{\"a}ndiger Interaktionstechniken in VR ab und beschreiben einen eigenen Ansatz. Dabei geht es um die zweih{\"a}ndige Interaktion bei der Simulation flexibler biegeschlaffer Bauteile (z. B. Schlauchverbindungen).}, language = {de} } @article{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Journal of the 3D-Forum Society}, volume = {13}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {66 -- 70}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} } @inproceedings{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Second International Conference on Human and Computer}, booktitle = {Second International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {1 -- 5}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} } @inproceedings{MyszkowskiHerderKuniietal.1996, author = {Myszkowski, Karol and Herder, Jens and Kunii, Tosiyasu L. and Ibusuki, Masumi}, title = {Visualization and analysis of occlusion for human jaws using a "functionally generated path"}, series = {IS\&T/SPIE Symp. on Electronic Imaging, Visual Data Exploration and Analysis III}, booktitle = {IS\&T/SPIE Symp. on Electronic Imaging, Visual Data Exploration and Analysis III}, publisher = {The International Society for Optical Engineering}, address = {San Jose}, doi = {10.1117/12.234684}, pages = {360 -- 367}, year = {1996}, abstract = {Dynamic characteristics of occlusion during lower jaw motion are useful in the diagnosis of jaw articulation problems and in computer-aided design/manufacture of teeth restorations. The Functionally Generated Path (FGP), produced as a surface which envelops the actual occlusal surface of the moving opponent jaw, can be used for compact representation of dynamic occlusal relations. In traditional dentistry FGP is recorded as a bite impression in a patient's mouth. We propose an efficient computerized technique for FGP reconstruction and validate it through implementation and testing. The distance maps between occlusal surfaces of jaws, calculated for multiple projection directions and accumulated for mandibular motion, provide information for FGP computation. Rasterizing graphics hardware is used for fast calculation of the distance maps. Real-world data are used: the scanned shape of teeth and the measured motion of the lower jaw. We show applications of FGP to analysis of the occlusion relations and occlusal surface design for restorations.}, language = {en} } @inproceedings{MyszkowskiOkunevaHerderetal.1997, author = {Myszkowski, Karol and Okuneva, Galina and Herder, Jens and Kunii, Tosiyasu L. and Ibusuki, Masumi}, title = {Visual Simulation of the Chewing Process for Dentistry}, series = {Visualization \& Modeling}, booktitle = {Visualization \& Modeling}, editor = {Earnshaw, Rae A. and Huw, Jones and John, Vince}, publisher = {Academic Press}, address = {London}, isbn = {0-12-227738-4}, pages = {419 -- 438}, year = {1997}, abstract = {CAD/CAM techniques are increasingly used in dentistry for the design and fabrication of teeth estorations. Important concerns are the correction of articulation problems that existed before treatment and the prevention of treatment-generated problems. These require interactive evaluation of the occlusal surfaces of teeth during mastication. Traditional techniques based on the use of casts with mechanical articulators require manual adjustment of occlusal surfaces, which becomes impractical when hard restoration materials like porcelain are used; they are also time and labor consuming and provide little visual information. We present new visual tools and a related user interface for global articulation simulation, developed for the Intelligent Dental Care System project. The aim of the simulation is visual representation of characteristics relevant to the chewing process. The simulation is based on the construction of distance maps, which are visual representations of the distributions of the distances of points in a tooth to the opposite jaw. We use rasterizing graphics hardware for fast calculation of the distance maps. Distance maps are used for collision detection and for the derivation of various characteristics showing the distribution of load on the teeth and the chewing capability of the teeth. Such characteristics can be calculated for particular positions of the jaws; cumulative characteristics are used to describe the properties of jaw movement. This information may be used for interactive design of the occlusal surfaces of restorations and for jaw articulation diagnosis. We also demonstrate elements of a user interface that exploit metaphors familiar to dentists from everyday practice.}, language = {en} } @inproceedings{MyszkowskiOkunevaHerderetal.1995, author = {Myszkowski, Karol and Okuneva, Galina and Herder, Jens and Kunii, Tosiyasu L. and Ibusuki, Masumi}, title = {Visual Simulation of the Chewing Process for Dentistry}, series = {Visualization \& Modelling, International Conf., 5-7 December, 1995}, booktitle = {Visualization \& Modelling, International Conf., 5-7 December, 1995}, address = {Leeds}, year = {1995}, abstract = {CAD/CAM techniques are increasingly used in dentistry for the design and fabrication of teeth restorations. Important concerns are the correction of articulation problems that existed beforetreatment and the prevention of treatment-generated problems. These require interactive evaluation of the occlusal surfaces of teeth during mastication. Traditional techniques based on the use of casts with mechanical articulators require manual adjustment of occlusal surfaces, which becomes impractical when hard restoration materials like porcelain are used; they are also time and labor consuming and provide little visual information. We present new visual tools and a related user interface for global articulation simulation, developed for the Intelligent Dental Care System project. The aim of the simulation is visual representation of characteristics relevant to the chewing process. The simulation is based on the construction of distance maps, which are visual representations of the distributions of the distances of points in a tooth to the opposite jaw. We use rasterizing graphics hardware for fast calculation of the distance maps. Distance maps are used for collision detection and for the derivation of various characteristics showing the distribution of load on the teeth and the chewing capability of the teeth. Such characteristics can be calculated for particular positions of the jaws; cumulative characteristics are used to describe the properties of jaw movement. This information may be used for interactive design of the occlusal surfaces of restorations and for jaw articulation diagnosis. We also demonstrate elements of a user interface that exploit metaphors familiar to dentists from everyday practice.}, language = {en} } @inproceedings{AytenHerderVonolfen2010, author = {Ayten, H{\"u}seyin and Herder, Jens and Vonolfen, Wolfgang}, title = {Visual Acceptance Evaluation of Soft Shadow Algorithms for Virtual TV Studios}, series = {HC '10 Proceedings of the 13th International Conference on Humans and Computers}, booktitle = {HC '10 Proceedings of the 13th International Conference on Humans and Computers}, publisher = {University of Aizu Press}, address = {Aizu-Wakamatsu}, pages = {66 -- 71}, year = {2010}, abstract = {Shadows in computer graphics are an important rendering aspect for spatial objects. For realtime computer applications such as games, it is essential to represent shadows as accurate as possible. Also, various tv stations work with virtual studio systems instead of real studio sets. Especially for those systems, a realistic impression of the rendered and mixed scene is important. One challenge, hence, is the creation of a natural shadow impression. This paper presents the results of an empirical study to compare the performance and quality of different shadow mapping methods. For this test, a prototype studio renderer was developed. A percentage closer filter (pcf) with a number of specific resolutions is used to minimize the aliasing issue. More advanced algorithms which generate smooth shadows like the percentage closer soft shadow (pcss) method as well as the variance shadow maps (vsm) method are analysed. Different open source apis are used to develop the virtual studio renderer, giving the benefit of permanent enhancement. The Ogre 3D graphic engine is used to implement the rendering system, benefiting from various functions and plugins. The transmission of the tracking data is accomplished with the vrpn server/client and the Intersense api. The different shadow algorithms are compared in a virtual studio environment which also casts real shadows and thus gives a chance for a direct comparison throughout the empirical user study. The performance is measured in frames per secon}, language = {en} } @inproceedings{KlapdohrWoeldeckeMarinosetal.2010, author = {Klapdohr, Monika and W{\"o}ldecke, Bj{\"o}rn and Marinos, Dionysios and Herder, Jens and Geiger, Christian and Vonolfen, Wolfgang}, title = {Vibrotactile Pitfalls: Arm Guidance for Moderators in Virtual TV Studios}, series = {HC '10 Proceedings of the 13th International Conference on Humans and Computers}, booktitle = {HC '10 Proceedings of the 13th International Conference on Humans and Computers}, publisher = {University of Aizu Press}, address = {Aizu-Wakamatsu}, pages = {72 -- 80}, year = {2010}, abstract = {For this study, an experimental vibrotactile feedback system was developed to help actors with the task of moving their arm to a certain place in a virtual tv studio under live conditions. Our intention is to improve interaction with virtual objects in a virtual set, which are usually not directly visible to the actor, but only on distant displays. Vibrotactile feedback might improve the appearance on tv because an actor is able to look in any desired direction (camera or virtual object) or to read text on a teleprompter while interacting with a virtual object. Visual feedback in a virtual studio lacks spatial relation to the actor, which impedes the adjustment of the desired interaction. The five tactors of the implemented system which are mounted on the tracked arm give additional information like collision, navigation and activation. The user study for the developed system shows that the duration for reaching a certain target is much longer in case no visual feedback is given, but the accuracy is similar. In this study, subjects reported that an activation signal indicating the arrival at the target of a drag \& drop task was helpful. In this paper, we discuss the problems we encountered while developing such a vibrotactile display. Keeping these pitfalls in mind could lead to better feedback systems for actors in virtual studio environments.}, language = {en} } @incollection{HerderWoerzbergerJuttneretal.2005, author = {Herder, Jens and W{\"o}rzberger, Ralf and Juttner, Carsten and Twelker, Uwe}, title = {Verwendung von Grafikkarten-Prozessoren (GPUs) f{\"u}r eine interaktive Produktvisualisierung in Echtzeit unter Verwendung von Shadern und Videotexturen}, series = {Augmented and Virtual Reality in der Produktentstehung}, volume = {167}, booktitle = {Augmented and Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, pages = {23 -- 36}, year = {2005}, abstract = {Die Visualisierung von Produkten in Echtzeit ist in vielen Bereichen ein hilfreicher Schritt, um potentiellen Kunden eine Vorstellung vom Einsatzgebiet und einen {\"U}berblick {\"u}ber die finale Anwendung zu erlauben. In den letzten Jahren haben neue Technologien in der Grafikkartenindustrie dazu gef{\"u}hrt, dass fr{\"u}her nur auf teuren Grafikworkstations verf{\"u}gbare M{\"o}glichkeiten nun auch mit relativ kosteng{\"u}nstigen Karten, welche f{\"u}r den Einsatz in Standard-PCs konzipiert wurden, realisierbar sind. Es wird an einem Modellentwurf des Innenraums des People Cargo Movers gezeigt, wie die Beleuchtung innerhalb einer Echtzeitvisualisierung durch Shader realisiert werden kann. Als Lichtquelle wird dabei eine Landschaftsaufnahme herangezogen, welche als eine von mehreren Videotexturen eingebunden wurde. Außerdem werden real im virtuellen Studio gefilmte Personen im Innenraum gleicherma{\"i}‚en {\"u}ber Videotexturen dargestellt und ebenfalls durch die Landschaft beleuchtet.}, language = {de} } @article{HerderWoerzbergerTwelkeretal.2002, author = {Herder, Jens and W{\"o}rzberger, Ralf and Twelker, Uwe and Albertz, Stefan}, title = {Use of Virtual Environments in the Promotion and Evaluation of Architectural Designs}, series = {Journal of the 3D-Forum Society}, volume = {16}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {117 -- 122}, year = {2002}, abstract = {Virtual environments can create a realistic impression of an architectural space during the architectural design process, providing a powerful tool for evaluation and promotion during a project's early stages. In comparison to pre-rendered animations, such as walkthroughs based on CAD models, virtual environments can offer intuitive interaction and a more life like experience. Advanced virtual environments allow users to change realtime rendering features with a few manipulations, switching between different versions while still maintaining sensory immersion. This paper reports on an experimental project in which architectural models are being integrated into interactive virtual environments, and includes demonstrations of both the possibilities and limitations of such applications in evaluating, presenting and promoting architectural designs.}, language = {en} } @inproceedings{LadwigHerderGeiger2017, author = {Ladwig, Philipp and Herder, Jens and Geiger, Christian}, title = {Towards Precise, Fast and Comfortable Immersive Polygon Mesh Modelling: Capitalising the Results of Past Research and Analysing the Needs of Professionals}, series = {ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}, booktitle = {ICAT-EGVE 2017 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}, publisher = {The Eurographics Association}, doi = {10.2312/egve.20171360}, pages = {22 -- 24}, year = {2017}, abstract = {More than three decades of ongoing research in immersive modelling has revealed many advantages of creating objects in virtual environments. Even though there are many benefits, the potential of immersive modelling has only been partly exploited due to unresolved problems such as ergonomic problems, numerous challenges with user interaction and the inability to perform exact, fast and progressive refinements. This paper explores past research, shows alternative approaches and proposes novel interaction tools for pending problems. An immersive modelling application for polygon meshes is created from scratch and tested by professional users of desktop modelling tools, such as Autodesk Maya, in order to assess the efficiency, comfort and speed of the proposed application with direct comparison to professional desktop modelling tools.}, language = {en} } @inproceedings{BrosdaDaemenDjuderijaetal.2012, author = {Brosda, Constantin and Daemen, Jeff and Djuderija, Sascha and Joeres, Stephan and Langer, Oleg and Schweitzer, Andre and Wilhelm, Andreas and Herder, Jens}, title = {TouchPlanVS Lite - A Tablet-based Tangible Multitouch Planning System for Virtual TV Studio Productions}, series = {Proceedings of the 2012 Joint International Conference on Human-Centered Computer Environments}, booktitle = {Proceedings of the 2012 Joint International Conference on Human-Centered Computer Environments}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-1191-5}, pages = {64 -- 67}, year = {2012}, abstract = {This paper presents a mobile approach of integrating tangible user feedback in today's virtual TV studio productions. We describe a tangible multitouch planning system, enabling a single user to prepare and customize scene flow and settings. Users can view and interact with virtual objects by using a tangible user interface on a capacitive multitouch surface. In a 2D setting created TV scenes are simultaneously rendered as separate view using a production/target renderer in 3D. Thereby the user experiences a closer reproduction of a final production and set assets can be reused. Subsequently, a user can arrange scenes on a timeline while maintaining different versions/sequences. The system consists of a tablet and a workstation, which does all application processing and rendering. The tablet is just an interface connected via wireless LAN.}, language = {en} } @inproceedings{HerderBrosdaDjuderijaetal.2011, author = {Herder, Jens and Brosda, Constantin and Djuderija, Sascha and Drochtert, Daniel and Genc, {\"O}mer and Joeres, Stephan and Kellerberg, Patrick and Looschen, Simon and Geiger, Christian and W{\"o}ldecke, Bj{\"o}rn}, title = {TouchPlanVS - A Tangible Multitouch Planning System for Virtual TV Studio Productions}, series = {2011 IEEE Symposium on 3D User Interfaces (3DUI)}, booktitle = {2011 IEEE Symposium on 3D User Interfaces (3DUI)}, publisher = {IEEE}, address = {Singapore}, isbn = {978-1-4577-0064-4}, doi = {10.1109/3DUI.2011.5759226}, pages = {103 -- 104}, year = {2011}, abstract = {This article presents a new approach of integrating tangible user feedback in todays virtual TV studio productions. We describe a tangible multitouch planning system, enabling multiple users to prepare and customize scene flow and settings. Users can collaboratively view and interact with virtual objects by using a tangible user interface on a shared multitouch surface. The in a 2D setting created TV scenes are simultaneously rendered on an external monitor, using a production/target renderer in 3D. Thereby the user experiences a closer reproduction of a final production. Subsequently, users are able to join together the scenes into one complex plot. Within the developing process, a video prototype of the system shows the user interaction and enables early reviews and evaluations. The requirement analysis is based on expert interviews.}, language = {en} } @inproceedings{Herder1997, author = {Herder, Jens}, title = {Tools and widgets for spatial sound authoring}, series = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, booktitle = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, address = {Portugal}, isbn = {972-8342-02-0}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-896}, pages = {87 -- 95}, year = {1997}, abstract = {Broader use of virtual reality environments and sophisticated animations spawn a need for spatial sound. Until now, spatial sound design has been based very much on experience and trial and error. Most effects are hand-crafted, because good design tools for spatial sound do not exist. This paper discusses spatial sound authoring and its applications, including shared virtual reality environments based on VRML. New utilities introduced by this research are an inspector for sound sources, an interactive resource manager, and a visual soundscape manipulator. The tools are part of a sound spatialization framework and allow a designer/author of multimedia content to monitor and debug sound events. Resource constraints like limited sound spatialization channels can also be simulated.}, language = {en} } @article{Herder1998, author = {Herder, Jens}, title = {Tools and Widgets for Spatial Sound Authoring}, series = {Computer Networks \& ISDN Systems}, volume = {30}, journal = {Computer Networks \& ISDN Systems}, number = {20-21}, publisher = {Elsevier}, pages = {1933 -- 1940}, year = {1998}, language = {en} } @inproceedings{AmanoMatsushitaYanagawaetal.1996, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Koba, Yoshiharu and Tohyama, Mikio}, title = {The Pioneer sound field control system at the University of Aizu Multimedia Center}, series = {RO-MAN '96 Tsukuba}, booktitle = {RO-MAN '96 Tsukuba}, publisher = {IEEE}, address = {Piscataway}, isbn = {0-7803-3253-9}, doi = {10.1109/ROMAN.1996.568887}, pages = {495 -- 499}, year = {1996}, abstract = {The PSFC, or Pioneer sound field control system, is a DSP-driven hemispherical 14-loudspeaker array, installed at the University of Aizu Multimedia Center. Collocated with a large screen rear-projection stereographic display the PSFC features realtime control of virtual room characteristics and direction of two separate sound channels, smoothly steering them around a configurable soundscape. The PSFC controls an entire sound field, including sound direction, virtual distance, and simulated environment (reverb level, room size and liveness) for each source. It can also configure a dry (DSP-less) switching matrix for direct directionalization. The PSFC speaker dome is about 14 m in diameter, allowing about twenty users at once to comfortably stand or sit near its sweet spot.}, language = {en} } @article{HerderCohen2002, author = {Herder, Jens and Cohen, Michael}, title = {The Helical Keyboard: Perspectives for Spatial Auditory Displays and Visual Music}, series = {Journal of New Music Research}, volume = {31}, journal = {Journal of New Music Research}, number = {3}, pages = {269 -- 281}, year = {2002}, abstract = {Auditory displays with the ability to dynamically spatialize virtual sound sources under real-time conditions enable advanced applications for art and music. A listener can be deeply immersed while interacting and participating in the experience. We review some of those applications while focusing on the Helical Keyboard project and discussing the required technology. Inspired by the cyclical nature of octaves and helical structure of a scale, a model of a piano-style keyboard was prepared, which was then geometrically warped into a helicoidal configuration, one octave/revolution, pitch mapped to height and chroma. It can be driven by MIDI events, real-time or sequenced, which stream is both synthesized and spatialized by a spatial sound display. The sound of the respective notes is spatialized with respect to sinks, avatars of the human user, by default in the tube of the helix. Alternative coloring schemes can be applied, including a color map compatible with chromastereoptic eyewear. The graphical display animates polygons, interpolating between the notes of a chord across the tube of the helix. Recognition of simple chords allows directionalization of all the notes of a major triad from the position of its musical root. The system is designed to allow, for instance, separate audition of harmony and melody, commonly played by the left and right hands, respectively, on a normal keyboard. Perhaps the most exotic feature of the interface is the ability to fork one{\~A}­s presence, replicating subject instead of object by installing multiple sinks at arbitrary places around a virtual scene so that, for example, harmony and melody can be separately spatialized, using two heads to normalize the octave; such a technique effectively doubles the helix from the perspective of a single listener. Rather than a symmetric arrangement of the individual helices, they are perceptually superimposed in-phase, co-extensively, so that corresponding notes in different registers are at the same azimuth.}, language = {en} } @incollection{HerderJaenschHorstetal.2004, author = {Herder, Jens and Jaensch, Kai and Horst, Bruno and Novotny, Thomas}, title = {Testm{\"a}rkte in einer Virtuellen Umgebung - Die Bestimmung von Preisabsatzfunktionen zur Unterst{\"u}tzung des Innovationsmanagements}, series = {Augmented and Virtual Reality in der Produktentstehung}, volume = {149}, booktitle = {Augmented and Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, isbn = {3-935433-58-1}, pages = {97 -- 110}, year = {2004}, abstract = {Multimediale Technologien werden in der Marktforschung immer st{\"a}rker eingesetzt, um flexible und kosteng{\"u}nstige Studien durchzuf{\"u}hren. Im Innovationsprozess kann dabei auf die langj{\"a}hrigen Erfahrungen zur{\"u}ckgegriffen werden, die durch den Einsatz der Computersimulation in der technischen Produktentwicklung zustande gekommen sind. In sehr fr{\"u}hen Phasen des Innovationsprozesses k{\"o}nnen durch Einsatz der neuen Technologien die Markteinf{\"u}hrungskonzepte f{\"u}r neue Produkte getestet werden. Die Applikationen der virtuellen Realit{\"a}t bieten ein einzigartiges Potential, neue Produkte einschlie{\"i}‚lich des Marketingkonzeptes zu testen, ohne dass dieses Produkt bereits physisch vorhanden sein muss. Am Beispiel eines Elementes des Marketingkonzeptes, der Preispolitik, zeigt die vorliegende Studie auf, welches Potential die virtuelle Kaufsituation von Produkten bietet. Der Fokus des Projektes liegt auf der interaktiven Produktpr{\"a}sentation in einer virtuellen Umgebung, die in eine Online-Befragung mit zus{\"a}tzlichen Werbefilmen eingebettet ist. Visuell hochwertige 3D-Produktpr{\"a}sentationen versetzen den Probanden in eine virtuelle Einkaufsumgebung, die einem realen Szenario entspricht. Die virtuellen Produkte werden in mehreren Kaufentscheidungsrunden zu unterschiedlichen Preisen angeboten. Der Preisuntersuchung geht eine Pr{\"a}sentation ausgew{\"a}hlter Werbespots sowie eine produktbezogene Befragung voraus. Im Anschluss an die virtuellen Preisentscheidungen werden die Eindr{\"u}cke sowie einige Kontrollgr{\"o}en abgefragt. In weitergehenden Studien dieser Art k{\"o}nnen die Wirkungen mehrerer Marketing-Instrumente zu einem Zeitpunkt untersucht werden, in dem sich die Produkte noch im Entwicklungsprozess befinden. Auf diesem Weg lassen sich auch Wettbewerbsvorteile bestehender Produkte effizienter erkennen und nutzen. Mit den hoch entwickelten Computer- und Visualisierungstechnologien ist ein m{\"a}chtiges Werkzeug entstanden, das bereits f{\"u}r kommerzielle Pr{\"a}sentationen und Produktstudien eingesetzt wird. Zuk{\"u}nftig kann es auch in Kombination mit Internetanwendungen und klassischen Methoden der Marktforschung zu einem sehr fr{\"u}hen Zeitpunkt umfassende Erkenntnisse {\"u}ber ein Produkt liefern.}, language = {en} } @incollection{CohenHerder1998, author = {Cohen, Michael and Herder, Jens}, title = {Symbolic representations of exclude and include for audio sources and sinks: Figurative suggestions of mute/solo \& cue and deafen/confide \& harken}, series = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, booktitle = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, editor = {G{\"o}bel, Martin and Landauer, J{\"u}rgen and Lang, Ulrich and Wapler, Matthias}, publisher = {Springer-Verlag}, address = {Stuttgart}, isbn = {3-211-83233-5}, doi = {10.1007/978-3-7091-7519-4_23}, pages = {235 -- 242}, year = {1998}, language = {en} } @inproceedings{HerderBuentigDaemenetal.2014, author = {Herder, Jens and B{\"u}ntig, Fabian and Daemen, Jeff and Lang, Jaroslaw and L{\"u}ck, Florian and S{\"a}ger, Mitja and S{\"o}rensen, Roluf and Hermanni, Markus and Vonolfen, Wolfgang}, title = {Subtle Animations using Talent Tracking in a Virtual (TV) Studio}, series = {17th International Conference on Human and Computer}, booktitle = {17th International Conference on Human and Computer}, address = {Hamamatsu/Aizu-Wakamatsu/Duesseldorf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-16009}, pages = {6}, year = {2014}, abstract = {Markerless talent tracking is widely used for interactions and animations within virtual environments. In a virtual (tv) studio talents could be overburden by interaction tasks because camera and text require extensive attention. We take a look into animations and inter- actions within a studio, which do not require any special attention or learning. We show the generation of an artificial shadow from a talent, which ease the keying process, where separation of real shadows from the background is a difficult task. We also demonstrate animations of footsteps and dust. Furthermore, capturing talents' height can also be used to adjust the parameters of elements in the virtual environment, like the position and scaling of a virtual display. In addition to the talents, a rigid body was tracked as placeholder for graphics, easing the interaction tasks for a talent. Two test productions show the possibilities, which subtle animations offer. In the second production, the rendering was improved (shadows, filtering, normal maps, ...) and instead of using the rigid body to move an object (a flag), the animation was only controlled by the hand's position.}, language = {en} } @inproceedings{WoeldeckeVierjahnFlaskoetal.2009, author = {W{\"o}ldecke, Bj{\"o}rn and Vierjahn, Tom and Flasko, Matthias and Herder, Jens and Geiger, Christian}, title = {Steering actors through a virtual set employing vibro-tactile feedback}, series = {TEI '09 Proceedings of the 3rd International Conference on Tangible and Embedded Interaction}, booktitle = {TEI '09 Proceedings of the 3rd International Conference on Tangible and Embedded Interaction}, publisher = {ACM}, address = {New York}, isbn = {978-1-60558-493-5}, doi = {10.1145/1517664.1517703}, pages = {169 -- 174}, year = {2009}, abstract = {Actors in virtual studio productions are faced with the challenge that they have to interact with invisible virtual objects because these elements are rendered separately and combined with the real image later in the production process. Virtual sets typically use static virtual elements or animated objects with predefined behavior so that actors can practice their performance and errors can be corrected in the post production. With the demand for inexpensive live recording and interactive TV productions, virtual objects will be dynamically rendered at arbitrary positions that cannot be predicted by the actor. Perceptive aids have to be employed to support a natural interaction with these objects. In our work we study the effect of haptic feedback for a simple form of interaction. Actors are equipped with a custom built haptic belt and get vibrotactile feedback during a small navigational task (path following). We present a prototype of a wireless vibrotactile feedback device and a small framework for evaluating haptic feedback in a virtual set environment. Results from an initial pilot study indicate that vibrotactile feedback is a suitable non-visual aid for interaction that is at least comparable to audio-visual alternatives used in virtual set productions.}, language = {en} } @inproceedings{RyskeldievIgarashiZhangetal.2018, author = {Ryskeldiev, Bektur and Igarashi, Toshiharu and Zhang, Junjian and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Spotility: Crowdsourced Telepresence for Social and Collaborative Experiences in Mobile Mixed Reality}, series = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, booktitle = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-6018-0}, doi = {10.1145/3272973.3274100}, pages = {373 -- 376}, year = {2018}, abstract = {Live video streaming is becoming increasingly popular as a form of interaction in social applications. One of its main advantages is an ability to immediately create and connect a community of remote users on the spot. In this paper we discuss how this feature can be used for crowdsourced completion of simple visual search tasks (such as finding specific objects in libraries and stores, or navigating around live events) and social interactions through mobile mixed reality telepresence interfaces. We present a prototype application that allows users to create a mixed reality space with a photospherical imagery as a background and interact with other connected users through viewpoint, audio, and video sharing, as well as realtime annotations in mixed reality space. Believing in the novelty of our system, we conducted a short series of interviews with industry professionals on the possible applications of our system. We discuss proposed use-cases for user evaluation, as well as outline future extensions of our system.}, language = {en} } @inproceedings{SimschHerder2014, author = {Simsch, Jonathan and Herder, Jens}, title = {SpiderFeedback - Visual Feedback for Orientation in Virtual TV Studios}, series = {ACE'14, 11th Advances in Computer Entertainment Technology Conference, ACM, Funchal, Portugal}, booktitle = {ACE'14, 11th Advances in Computer Entertainment Technology Conference, ACM, Funchal, Portugal}, editor = {Chisik, Yoram}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-2945-3}, doi = {10.1145/2663806.2663830}, pages = {8}, year = {2014}, abstract = {A visual and spatial feedback system for orientation in virtual sets of virtual TV studios was developed and evaluated. It is based on a green proxy object, which moves around in the acting space by way of four transparent wires. A separate unit controls four winches and is connected to an engine, which renders the virtual set. A new developed plugin registers a virtual object's position with the proxy object which imitates the virtual object's movement on stage. This will allow actors to establish important eye contact with a virtual object and feel more comfortable in a virtual set. Furthermore, interaction with the virtual object and its proxy can be realised through a markerless actor tracking system. Several possible scenarios for user application were recorded and presented to experts in the broadcast industry, who evaluated the potential of SpiderFeedback in interviews and by questionnaires.}, language = {en} } @incollection{HerderNovotny2003, author = {Herder, Jens and Novotny, Thomas}, title = {Spatial Sound Design and Interaction for Virtual Environments in the Promotion of Architectural Designs}, series = {Third International Workshop on Spatial Media}, booktitle = {Third International Workshop on Spatial Media}, address = {Aizu-Wakamatsu}, pages = {7 -- 11}, year = {2003}, abstract = {Virtual environment walkthrough applications are generally enhanced by a user's interactions within a simulated architectural space, but the enhancement that stems from changes in spatial sound that are coupled with a user's behavior are particularly important, especially within regard to creating a sense of place. When accompanied by stereoscopic image synthesis, spatial sound can immerse the user in a high-realism virtual copy of the real world. An advanced virtual environment that allow users to change realtime rendering features with a few manipulations has been shown to enable switching between different versions of a modeled space while maintaining sensory immersion. This paper reports on an experimental project in which an architectural model is being integrated into such an interactive virtual environment. The focus is on the spatial sound design for supporting interaction, including demonstrations of both the possibilities and limitations of such applications in presenting and promoting architectural designs, as well as in three-dimensional sketching.}, language = {de} } @inproceedings{GarbeHerbstHerder2007, author = {Garbe, Katharina and Herbst, Iris and Herder, Jens}, title = {Spatial Audio for Augmented Reality}, series = {10th International Conference on Human and Computer}, booktitle = {10th International Conference on Human and Computer}, address = {D{\"u}sseldorf, Aizu-Wakamatsu}, pages = {53 -- 58}, year = {2007}, abstract = {Using spatial audio successfully for augmented reality (AR) applications is a challenge, but is awarded with an improved user experience. Thus, we have extended the AR/VR framework \sc Morgan with spatial audio to improve users orientation in an AR application. In this paper, we investigate the users' capability to localize and memorize spatial sounds (registered with virtual or real objects). We discuss two scenarios. In the first scenario, the user localizes only sound sources and in the second scenario the user memorizes the location of audio-visual objects. Our results reflect spatial audio performance within the application domain and show which technology pitfalls still exist. Finally, we provide design recommendations for spatial audio AR environments.}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Sound Spatialization Resource Management in Virtual Reality Environments}, series = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, booktitle = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, address = {Tokyo}, pages = {407 -- 414}, year = {1997}, abstract = {In a virtual reality environment users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. The sound spatialization resource manager controls sound resources and optimizes fidelity (presence) under given conditions. For that a priority scheme based on human psychophysical hearing is needed. Parameters for spatialization priorities include intensity calculated from volume and distance, orientation in the case of non-uniform radiation patterns, occluding objects, frequency spectrum (low frequencies are harder to localize), expected activity, and others. Objects which are spatially close together (depending on distance and direction) can be mixed. Sources that can not be spatialized can be treated as a single ambient sound source. Important for resource management is the resource assignment, i.e., minimizing swap operations, which makes it desirable to look-ahead and predict upcoming events in a scene. Prediction is achieved by monitoring objects' speed and past evaluation values. Fidelity is contrasted for Zifferent kind of resource restrictions and optimal resource assignment based upon unlimited dynamic scene look-ahead. To give standard and comparable results, the VRML 2.0 specification is used as an application programmer interface. Applicability is demonstrated with a helical keyboard, a polyphonic MIDI stream driven animation including user interaction (user moves around, playing together with programmed notes). The developed sound spatialization resource manager gives improved spatialization fidelity under runtime constraints. Application programmers and virtual reality scene designers are freed from the burden of assigning and predicting the sound sources.}, language = {en} } @inproceedings{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, booktitle = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, address = {Aizu}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-788}, pages = {6}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling.}, language = {en} } @article{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {Journal of the 3D-Forum Society}, volume = {12}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {17 -- 22}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling. Keywords: sound spatialization, resource management, virtual environments, spatial sound authoring, user interface design, human-machine interfaces}, language = {en} } @article{StruchholzHerderLeckschat2006, author = {Struchholz, Holger and Herder, Jens and Leckschat, Dieter}, title = {Sound radiation simulation of musical instruments based on interpolation and filtering of multi-channel recordings}, series = {Journal of the 3D-Forum Society}, volume = {20}, journal = {Journal of the 3D-Forum Society}, number = {1}, pages = {41 -- 47}, year = {2006}, abstract = {With the virtual environment developed here, the characteristic sound radiation patterns of musical instruments can be experienced in real-time. The user may freely move around a musical instrument, thereby receiving acoustic and visual feedback in real-time. The perception of auditory and visual effects is intensified by the combination of acoustic and visual elements, as well as the option of user interaction. The simulation of characteristic sound radiation patterns is based on interpolating the intensities of a multichannel recording and offers a near-natural mapping of the sound radiation patterns. Additionally, a simple filter has been developed, enabling the qualitative simulation of an instrument's characteristic sound radiation patterns to be easily implemented within real-time 3D applications. Both methods of simulating sound radiation patterns have been evaluated for a saxophone with respect to their functionality and validity by means of spectral analysis and an auditory experiment.}, language = {en} } @inproceedings{HerderWilkeHeimbachetal.2009, author = {Herder, Jens and Wilke, Michael and Heimbach, Julia and G{\"o}bel, Sebastian and Marinos, Dionysios}, title = {Simple Actor Tracking for Virtual TV Studios Using a Photonic Mixing Device}, series = {12th International Conference on Human and Computer}, booktitle = {12th International Conference on Human and Computer}, address = {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf}, year = {2009}, abstract = {Virtual TV studios use actor tracking systems for resolving the occlusion of computer graphics and studio camera image. The actor tracking delivers the distance between actor and studio camera. We deploy a photonic mixing device, which captures a depth map and a luminance image at low resolution. The renderer engines gets one depth value per actor using the OSC protocol. We describe the actor recognition algorithm based on the luminance image and the depth value calculation. We discuss technical issues like noise and calibration.}, language = {en} } @inproceedings{DaemenHerderKochetal.2016, author = {Daemen, Jeff and Herder, Jens and Koch, Cornelius and Ladwig, Philipp and Wiche, Roman and Wilgen, Kai}, title = {Semi-Automatic Camera and Switcher Control for Live Broadcast}, series = {TVX '16 Proceedings of the ACM International Conference on Interactive Experiences for TV and Online Video, Chicago, Illinois, USA — June 22 - 24, 2016}, booktitle = {TVX '16 Proceedings of the ACM International Conference on Interactive Experiences for TV and Online Video, Chicago, Illinois, USA — June 22 - 24, 2016}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-4067-0}, doi = {10.1145/2932206.2933559}, pages = {129 -- 134}, year = {2016}, abstract = {Live video broadcasting requires a multitude of professional expertise to enable multi-camera productions. Robotic systems allow the automation of common and repeated tracking shots. However, predefined camera shots do not allow quick adjustments when required due to unpredictable events. We introduce a modular automated robotic camera control and video switch system, based on fundamental cinematographic rules. The actors' positions are provided by a markerless tracking system. In addition, sound levels of actors' lavalier microphones are used to analyse the current scene. An expert system determines appropriate camera angles and decides when to switch from one camera to another. A test production was conducted to observe the developed prototype in a live broadcast scenario and served as a video-demonstration for an evaluation.}, language = {en} } @incollection{DavinHerder2021, author = {Davin, Till and Herder, Jens}, title = {Real-Time Relighting of Video Streams for Augmented Virtuality Scenes}, series = {GI VR / AR Workshop. Gesellschaft f{\"u}r Informatik e.V.}, booktitle = {GI VR / AR Workshop. Gesellschaft f{\"u}r Informatik e.V.}, editor = {Weier, Martin and Bues, Matthias and Wechner, Reto}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, doi = {10.18420/vrar2021_6}, publisher = {Hochschule D{\"u}sseldorf}, pages = {16}, year = {2021}, language = {en} } @inproceedings{WoeldeckeMarinosPogschebaetal.2011, author = {W{\"o}ldecke, Bj{\"o}rn and Marinos, Dionysios and Pogscheba, Patrick and Geiger, Christian and Herder, Jens and Schwirten, Tobias}, title = {radarTHEREMIN - Creating Musical Expressions in a Virtual Studio Environment}, series = {2011 IEEE International Symposium on VR Innovation}, booktitle = {2011 IEEE International Symposium on VR Innovation}, publisher = {IEEE}, address = {Singapore}, isbn = {978-1-4577-0055-2}, doi = {10.1109/ISVRI.2011.5759671}, pages = {345 -- 346}, year = {2011}, abstract = {In this paper we describe a prototypical system for live musical performance in a virtual studio environment. The performer stands in front of the studio camera and interacts with an infrared-laser-based multi-touch device. The final TV image shows the performer interacting with a virtual screen which is augmented in front of herself. To overcome the problem of the performer not seeing this virtual screen in reality, we use a special hexagonal grid to facilitate the performer's awareness of this novel Theremin-like virtual musical instrument.}, language = {en} } @inproceedings{MartensHerder1999, author = {Martens, William L. and Herder, Jens}, title = {Perceptual criteria for eliminating reflectors and occluders from the rendering of environmental sound}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Given limited computational resources available for the rendering of spatial sound imagery, we seek to determine effective means for choosing whatcomponents of the rendering will provide the most audible differences in the results. Rather than begin with an analytic approach that attempts to predict audible differences on the basis of objective parameters, we chose to begin with subjective tests of how audibly different the rendering result may be heard to be when that result includes two types of sound obstruction: reflectors and occluders. Single-channel recordings of 90 short speech sounds were made in an anechoic chamber in the presence and absence of these two types of obstructions, and as the angle of those obstructions varied over a 90 degree range. These recordings were reproduced over a single loudspeaker in that anechoic chamber, and listeners were asked to rate how confident they were that the recording of each of these 90 stimuli included an obstruction. These confidence ratings can be used as an integral component in the evaluation function used to determine which reflectors and occluders are most important for rendering.}, language = {en} } @inproceedings{CohenHerderMartens2001, author = {Cohen, Michael and Herder, Jens and Martens, William}, title = {Panel: Eartop computing and cyberspatial audio technology}, series = {IEEE-VR2001: IEEE Virtual Reality}, booktitle = {IEEE-VR2001: IEEE Virtual Reality}, publisher = {IEEE}, address = {Yokohama}, isbn = {0-7695-0948-7}, pages = {322 -- 323}, year = {2001}, language = {en} } @inproceedings{Herder1999, author = {Herder, Jens}, title = {Optimization of Sound Spatialization Resource Management through Clustering}, series = {Second International Conference on Human and Computer}, booktitle = {Second International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {1 -- 7}, year = {1999}, abstract = {Level-of-detail is a concept well-known in computer graphics to reduce the number of rendered polygons. Depending on the distance to the subject (viewer), the objects' representation is changed. A similar concept is the clustering of sound sources for sound spatialization. Clusters can be used to hierarchically organize mixels and to optimize the use of resources, by grouping multiple sources together into a single representative ource. Such a clustering process should minimize the error of position allocation of elements, perceived as angle and distance, and also differences between velocity relative to the sink (i.e., Doppler shift). Objects with similar direction of motion and speed (relative to sink) in the same acoustic resolution cone and with similar distance to a sink can be grouped together.}, language = {en} } @article{JensHerder1999, author = {Jens Herder,}, title = {Optimization of Sound Spatialization Resource Management through Clustering}, series = {Journal of the 3D-Forum Society}, volume = {13}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {59 -- 65}, year = {1999}, abstract = {Level-of-detail is a concept well-known in computer graphics to reduce the number of rendered polygons. Depending on the distance to the subject (viewer), the objects' representation is changed. A similar concept is the clustering of sound sources for sound spatialization. Clusters can be used to hierarchically organize mixelsand to optimize the use of resources, by grouping multiple sources together into a single representative source. Such a clustering process should minimize the error of position allocation of elements, perceived as angle and distance, and also differences between velocity relative to the sink (i.e., Doppler shift). Objects with similar direction of motion and speed (relative to sink) in the same acoustic resolution cone and with similar distance to a sink can be grouped together.}, language = {de} } @inproceedings{FiedlerRillingBogenetal.2015, author = {Fiedler, Jannik and Rilling, Stefan and Bogen, Manfred and Herder, Jens}, title = {Multimodal interaction techniques in scientific data visualization: An analytical survey}, series = {In Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP-2015)}, booktitle = {In Proceedings of the 10th International Conference on Computer Graphics Theory and Applications (GRAPP-2015)}, editor = {Braz, Jos{\´e}}, publisher = {SCITEPRESS}, address = {s. l.}, isbn = {978-989-758-087-1}, doi = {10.5220/0005296404310437}, pages = {431 -- 437}, year = {2015}, abstract = {The interpretation process of complex data sets makes the integration of effective interaction techniques crucial. Recent work in the field of human-computer interaction has shown that there is strong evidence that multimodal user interaction, i.e. the integration of various input modalities and interaction techniques into one comprehensive user interface, can improve human performance when interacting with complex data sets. However, it is still unclear which factors make these user interfaces superior to unimodal user interfaces. The contribution of this work is an analytical comparison of a multimodal and a unimodal user interface for a scientific visualization application. We show that multimodal user interaction with simultaneously integrated speech and gesture input improves user performance regarding efficiency and ease of use.}, language = {en} } @inproceedings{HerderLadwigVermeegenetal.2018, author = {Herder, Jens and Ladwig, Philipp and Vermeegen, Kai and Hergert, Dennis and Busch, Florian and Klever, Kevin and Holthausen, Sebastian and Ryskeldiev, Bektur}, title = {Mixed Reality Experience - How to Use a Virtual (TV) Studio for Demonstration of Virtual Reality Applications}, series = {GRAPP 2018 - 13th International Conference on Computer Graphics Theory and Applications}, booktitle = {GRAPP 2018 - 13th International Conference on Computer Graphics Theory and Applications}, publisher = {INSTICC}, address = {Setubal - Portugal}, isbn = {978-989-758-287-5}, doi = {10.5220/0006637502810287}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-15823}, pages = {281 -- 287}, year = {2018}, abstract = {The article discusses the question of "How to convey the experience in a virtual environment to third parties?" and explains the different technical implementations which can be used for live streaming and recording of a mixed reality experience. The real-world applications of our approach include education, entertainment, e- sports, tutorials, and cinematic trailers, which can benefit from our research by finding a suitable solution for their needs. We explain and outline our Mixed Reality systems as well as discuss the experience of recorded demonstrations of different VR applications, including the need for calibrated camera lens parameters based on realtime encoder values.}, language = {en} } @inproceedings{HerderTakedaVermeegenetal.2019, author = {Herder, Jens and Takeda, Shinpei and Vermeegen, Kai and Davin, Till and Berners, Dominique and Ryskeldiev, Bektur and Zimmer, Christian and Druzetic, Ivana and Geiger, Christian}, title = {Mixed Reality Art Experiments - Immersive Access to Collective Memories}, series = {ISEA2019, Proceedings, 25th International Symposium on Electronic Art, Gwangju, South Korea, June 22-28, 2019}, booktitle = {ISEA2019, Proceedings, 25th International Symposium on Electronic Art, Gwangju, South Korea, June 22-28, 2019}, publisher = {IESA}, address = {Gwangju}, pages = {334 -- 341}, year = {2019}, abstract = {We report about several experiments on applying mixed reality technology in the context of accessing collective memories from atomic bombs, Holocaust and Second World War. We discuss the impact of Virtual Reality, Augmented Virtuality and Augmented Reality for specific memorial locations. We show how to use a virtual studio for demonstrating an augmented reality application for a specific location in a remote session within a video conference. Augmented Virtuality is used to recreate the local environment, thus providing a context and helping the participants recollect emotions related to a certain place. This technique demonstrates the advantages of using virtual (VR) and augmented (AR) reality environments for rapid prototyping and pitching project ideas in a live remote setting.}, language = {en} } @incollection{Herder2006, author = {Herder, Jens}, title = {Matching Light for Virtual Studio TV Productions}, series = {9th International Conference on Human and Computer}, booktitle = {9th International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {158 -- 162}, year = {2006}, abstract = {High dynamic range environments maps based on still images or video streams are used for computer animation or interactive systems. The task of realistic light setup of scenes using captured environment maps might be eased as well as the visual quality improves. In this article, we discuss the light setting problem for virtual studio (tv) layout and system become more complex to handle this new feature of studio light capturing. The analysis of system requirements identifies the technical challenges.}, language = {en} } @inproceedings{DaemenHaufsBrusbergHerder2013, author = {Daemen, Jeff and Haufs-Brusberg, Peter and Herder, Jens}, title = {Markerless Actor Tracking for Virtual (TV) Studio Applications}, series = {2013 International Joint Conference on Awareness Science and Technology \& Ubi-Media Computing (iCAST 2013 \& UMEDIA 2013)}, booktitle = {2013 International Joint Conference on Awareness Science and Technology \& Ubi-Media Computing (iCAST 2013 \& UMEDIA 2013)}, publisher = {IEEE}, address = {Aizu-Wakamatsu}, isbn = {978-1-4799-2364-9}, doi = {10.1109/ICAwST.2013.6765544}, pages = {790 -- 795}, year = {2013}, abstract = {Virtual (tv) studios gain much more acceptance through improvements in computer graphics and camera tracking. Still commercial studios cannot have full interaction between actors and virtual scene because actors data are not completely digital available as well as the feedback for actors is still not sufficient. Markerless full body tracking might revolutionize virtual studio technology as it allows better interaction between real and virtual world. This article reports about using a markerless actor tracking in a virtual studio with a tracking volume of nearly 40 cubic meter enabling up to three actors within the green box. The tracking is used for resolving the occlusion between virtual objects and actors so that the Tenderer can output automatically a mask for virtual objects in the foreground in case the actor is behind. It is also used for triggering functions scripted within the Tenderer engine, which are attached to virtual objects, starting any kind of action (e.g., animation). Last but not least the system is used for controlling avatars within the virtual set. All tracking and rendering is done within a studio frame rate of 50 Hz with about 3 frames delay. The markerless actor tracking within virtual studios is evaluated by experts using an interview approach. The statistical evaluation is based on a questionnaire.}, language = {en} } @article{JuttnerHerder2006, author = {Juttner, Carsten and Herder, Jens}, title = {Lighting an Interactive Scene in Real-time with a GPU and Video Textures}, series = {Journal of the 3D-Forum Society}, volume = {20}, journal = {Journal of the 3D-Forum Society}, number = {1}, pages = {22 -- 28}, year = {2006}, abstract = {The presentation of virtual environments in real time has always been a demanding task. Specially designed graphics hardware is necessary to deal with the large amounts of data these applications typically produce. For several years the chipsets that were used allowed only simple lighting models and fixed algorithms. But recent development has produced new graphics processing units (GPUs) that are much faster and more programmable than their predecessors. This paper presents an approach to take advantage of these new features. It uses a video texture as part of the lighting calculations for the passenger compartment of a virtual train and was run on the GPU of a recent PC graphics card. The task was to map the varying illumination of a filmed landscape onto the virtual objects and also onto another video texture (showing two passengers), thereby enhancing the realism of the scene.}, language = {en} } @inproceedings{MarinosGeigerHerder2012, author = {Marinos, Dionysios and Geiger, Christian and Herder, Jens}, title = {Large-Area Moderator Tracking and Demonstrational Configuration of Position Based Interactions for Virtual Studio}, series = {EuroITV '12 Proceedings of the 10th European Conference on Interactive TV and Video}, booktitle = {EuroITV '12 Proceedings of the 10th European Conference on Interactive TV and Video}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-1107-6}, doi = {10.1145/2325616.2325639}, pages = {105 -- 114}, year = {2012}, abstract = {In this paper we introduce a system for tracking persons walking or standing on a large planar surface and for using the acquired data to easily configure position based interactions for virtual studio productions. The tracking component of the system, radarTRACK, is based on a laser scanner device capable of delivering interaction points on a large configurable plane. By using the device on the floor it is possible to use the delivered data to detect feet positions and derive the position and orientation of one or more users in real time. The second component of the system, named OscCalibrator, allows for the easy creation of multidimensional linear mappings between input and output parameters and the routing of OSC messages within a single modular design environment. We demonstrate the use of our system to flexibly create position-based interactions in a virtual studio environment.}, language = {en} } @misc{Herder1992, author = {Herder, Jens}, title = {Konzeption, Implementierung und Integration einer Komponente zur inkrementellen Bezeichner- und Operatoranalyse innerhalb des PSGs}, address = {Darmstadt}, organization = {Technische Hochschule Darmstadt}, year = {1992}, abstract = {Der Programmier System Generator - PSG - des Fachgebiets Praktische Informatik in Darmstadt erzeugt aus einer Sprachdefinition eine sprachspezifische Programmierumgebung. Diese besteht u. a. aus einem Editor, welcher syntaktische und semantische Fehler von Programmfragmenten, die nicht vollst{\"a}ndig sein m{\"u}ssen, erkennen kann. Dem Benutzer werden per Men{\"u} Fehlerkorrekturen angeboten. Neben der freien Texteingabe besteht die M{\"o}glichkeit, den Text nur mit Hilfe von Men{\"u}s zu verfeinern. Teil dieses Editors ist die Bezeichneranalyse. Sie dient als Hilfsmittel f{\"u}r den Benutzer, indem f{\"u}r jede Stelle eines Programmfragmentes die g{\"u}ltigen Bezeichner ausgegeben werden k{\"o}nnen. Die Kontextanalyse setzt die Berechnung auf den von der Bezeichneranalyse erzeugten Daten auf, um semantische Fehler zu erkennen. Die bis zu dieser Arbeit verwendete Bezeichneranalyse im PSG unterst{\"u}tzt nur einfache Sprachkonzepte (z. B. Fortran und Pascal). Die G{\"u}ltigkeitskonzepte der Bezeichner von weiterentwickelten Sprachen (z. B. Modula-2, CHILL, Ada oder Pascal-XT) sind nicht vollst{\"a}ndig modellierbar. Wir stellen ein neues Konzept zur Definition und Berechnung der Bezeichneranalyse vor, das alle uns bekannten Sprachen mit statischer Typbindung unterst{\"u}tzt. Hierf{\"u}r haben wir die Sprache BIS - Bezeichneridentifikationssprache - definiert. Die Methode ist verwandt mid dem Zwischencode f{\"u}r geordnete Attributierte Grammatiken. F{\"u}r jeden Knoten des Abstrakten Syntaxbaumes wird mit Hilfe von BIS ein Code f{\"u}r eine abstrakte Maschine, welche die Bezeichneranalyse durchf{\"u}hrt, geschrieben. Im Gegensatz zu herk{\"o}mlichen Methoden (verkettete Symboltabellen) wird f{\"u}r jeden Punkt innerhalb eines Programmes for der Anfrage durch den Benutzer oder der Kontextanalyse die Menge der g{\"u}ltigen Bezeichner berechnet. Die Kosten f{\"u}r eine Anfrage sind dadurch minimal. Diese abstrakte Maschine teilt sich in zwei unabh{\"a}ngige Maschinen auf, zum einen in die S-Maschine, die die speziellen Operationen der Bezeichneranalyse durchf{\"u}hrt, und zum anderen in die G-Maschine, die den Datenfluss und die Auswertung steuert. Diese Aufteilung erm{\"o}glicht den Austausch der S-Maschine durch eine andere, welche neue Anwendungsgebiete erschliesst, z. B. die eines Praeprozessors. Die G-Maschine arbeitet inkrementell; es werden nur die Codeschablonen neu ausgewertet, deren geerbten Attribute sich ge{\"a}ndert haben. Dazu m{\"u}ssen die Daten, die in einer Codeschablone hinein- und hinausfliessen, abgelegt werden. Dies ergibt bei grossen Programmfragmenten eine immense Rechenzeiteinsparung auf Kosten des Speicherplatzes. Die Funktionsweise wird an einer kleinen Beispielsprache demonstriert, die zu Pascal {\"a}hnlich ist. Diese besitzt Konstrukte zum Import und Export von Daten und Datentypen zwischen Programmfragmenten. Im Prototyp kann die inkrementelle Arbeitsweise abgeschaltet werden und erm{\"o}glicht einen guten Vergleich der Verfahren.}, language = {de} } @incollection{HerderKronenwettLambertzetal.2006, author = {Herder, Jens and Kronenwett, Ralf and Lambertz, Simone and Kiefer, Georg and Freihoff, Johann}, title = {Interaktive Echtzeit-3D-Visualisierung Webbasierte Darstellung: Mobilisierung und Homing von Blutstammzellen}, series = {Mensch and Computer 2006: Mensch und Computer im Struktur Wandel}, booktitle = {Mensch and Computer 2006: Mensch und Computer im Struktur Wandel}, editor = {Heinecke, A. M. and Paul, H.}, publisher = {Oldenbourg Verlag}, address = {M{\"u}nchen}, pages = {405 -- 409}, year = {2006}, abstract = {Die interaktive Echtzeit 3D-Visualisierung Mobilisierung und Homing von Blutstammzellen wurde konzipiert, um ein sehr komplexes medizinisches Wissen mit den Mitteln der 3-dimensionalen Visualisierung in Echtzeit und des Internets sowie der daraus resultierenden Interaktivit{\"a}t aufzubereiten. Dies musste auf einer Ebene geschehen, die es hinterher auch jedem Nicht-Mediziner erlaubt, die grundlegenden biologischen und medizinischen Sachverhalte nachzuvollziehen. Das Resultat: Eine informative und didaktische Anwendung, aus einer Mischung von interaktiven 3D-Stationen und erkl{\"a}renden 3D-Animationen. Diskutiert werden die Methodik der Konzeptionsphase und die Interaktionstechniken.}, subject = {Visualisierung}, language = {de} } @inproceedings{HerderVonolfenGriesertetal.2004, author = {Herder, Jens and Vonolfen, Wolfgang and Griesert, Arnfried and Heuer, Stefan and Hoffmann, Ansgar and H{\"o}ppner, Bernd}, title = {Interactive Virtual Set Applications for Post Production}, series = {University of Aizu 2004 - Seventh International Conference on Human}, booktitle = {University of Aizu 2004 - Seventh International Conference on Human}, address = {Aizu-Wakamatsu}, year = {2004}, abstract = {Virtual set environments for broadcasting become more sophisticated as well as the visual quality improves. Realtime interaction and production-specific visualization implemented through plugin mechanism enhance the existing systems like the 3DK. This work presents the integration of the Intersense IS-900 SCT camera tracking and 3D interaction into the 3DK virtual studio environment. The main goal of this work is the design of a virtual studio environment for post productions, which includes video output as well as media streaming formats such as MPEG-4. The systems allows high quality offline rendering during post production and 3D interaction by the moderator during the recording.}, language = {en} } @misc{Herder2000, author = {Herder, Jens}, title = {Interactive Sound Spatialization - a Primer}, series = {MM News, University of Aizu Multimedia Center}, volume = {8}, journal = {MM News, University of Aizu Multimedia Center}, pages = {8 -- 12}, year = {2000}, abstract = {Sound spatialization is a technology which puts sound into the three dimensional space, so that it has a perceivable direction and distance. Interactive means mutually or reciprocally active. Interaction is when one action (e.g., user moves mouse) has direct or immediate influence to other actions (e.g., processing by a computer: graphics change in size). Based on this definition an introduction to sound reproduction using DVD and virtual environments is given and illustrated by applications (e.g., virtual converts).}, language = {mul} } @article{GriesertWalczakHerder2003, author = {Griesert, Arnfried and Walczak, Oliver and Herder, Jens}, title = {Interactive Realtime Terrain Visualization for Virtual Set Applications}, series = {Journal of the 3D-Forum Society}, volume = {17}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {20 -- 26}, year = {2003}, abstract = {Virtual set environments for broadcasting become more sophisticated as well as the visual quality improves. Realtime interaction and production-specific visualization implemented through plugin mechanism enhance the existing systems like the virtual studio software 3DK. This work presents an algorithm which can dynamically manage textures of high resolution by prefetching them depending on their requirement in memory and map them on a procedural mesh in realtime. The main goal application of this work is the virtual representation of a flight over a landscape as part of weather reports in virtual studios and the interaction by the moderator.}, language = {en} } @inproceedings{Herder2001, author = {Herder, Jens}, title = {Interactive Content Creation with Virtual Set Environments}, series = {Fourth International Conference on Human and Computer}, booktitle = {Fourth International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, year = {2001}, abstract = {Digital broadcasting enables interactive \sc tv, which presents new challenges for interactive content creation. Besides the technology for streaming and viewing, tools and systems are under development that extend traditional \sc tv studios with virtual set environments. This presentation reviews current technology and describes the requirements for such systems. Interoperability over the production, streaming, and viewer levels requires open interfaces. As the technology allow more interaction, it becomes inherent difficult to control the quality of the viewers experience}, language = {en} } @article{Herder2001, author = {Herder, Jens}, title = {Interactive Content Creation with Virtual Set Environments}, series = {Journal of the 3D-Forum Society}, volume = {15}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {53 -- 56}, year = {2001}, abstract = {Digital broadcasting enables interactive \sc tv studios with virtual set environments. This presentation reviews current technology and describes the requirements for such systems. Interoperability over the production, streaming, and viewer levels requires open interfaces. As the technology allow more interaction, it becomes inherent difficult to control the quality of the viewers experience.}, language = {en} } @inproceedings{SchmittHerderBhalla1997, author = {Schmitt, Lothar M. and Herder, Jens and Bhalla, Subhash}, title = {Information Retrieval and Database Architecture for Conventional Japanese Character Dictionaries}, series = {Proceedings, Second International Conference on Cognitive Technology}, booktitle = {Proceedings, Second International Conference on Cognitive Technology}, editor = {Gorayska, Barbara and Nehaniv, Chrystopher L. and Marsh, Jonathon P.}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {0-8186-8084-9}, pages = {200 -- 217}, year = {1997}, abstract = {The cycle of abstraction-reconstruction which occurs as a fundamental principle in the development of culture and in cognitive processes is described and analyzed. This approach leads to recognition of boundary conditions for and directions of probable development of cognitive tools. It is shown how the transition from a conventional Japanese-English character dictionary to a multi-dimensional language database is an instance of such an abstraction-reconstruction cycle. The different phases of the design of a multi-dimensional language database based upon different computer software technologies are properly placed in this cycle. The methods used include the use of UNIX software tools, classical database methods as-well-as the use of search engines based upon full text search in this process. Several directions of application and extension for a multi-dimensional language database are discussed from the general point of view of an abstraction-reconstruction cycle.}, language = {en} } @inproceedings{LudwigBuechelHerderetal.2012, author = {Ludwig, Philipp and B{\"u}chel, Joachim and Herder, Jens and Vonolfen, Wolfgang}, title = {InEarGuide - A Navigation and Interaction Feedback System using In Ear Headphones for Virtual TV Studio Productions}, series = {9. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, booktitle = {9. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, address = {D{\"u}sseldorf}, year = {2012}, abstract = {This paper presents an approach to integrate non-visual user feedback in today's virtual tv studio productions. Since recent studies showed that systems providing vibro-tactile feedback are not sufficient for replacing the common visual feedback, we developed an audio-based solution using an in ear headphone system, enabling a talent to move, avoid and point to virtual objects in a blue or green box. The system consists of an optical head tracking system, a wireless in ear monitor system and a workstation, which performs all application and audio processing. Using head related transfer functions, the talent gets directional and distance cues. Past research showed, that generating reflections of the sounds and simulating the acoustics of the virtual room helps the listener to conceive the acoustical feedback, we included this technique as well. In a user study with 15 participants the performance of the system was evaluated.}, language = {en} } @inproceedings{VierjahnWoeldeckeGeigeretal.2009, author = {Vierjahn, Tom and W{\"o}ldecke, Bj{\"o}rn and Geiger, Christian and Herder, Jens}, title = {Improved Direction Signalization Technique Employing Vibrotactile Feedback}, series = {11th Virtual Reality International Conference, VRIC'2009}, booktitle = {11th Virtual Reality International Conference, VRIC'2009}, isbn = {2-9515730-8-1}, pages = {1 -- 8}, year = {2009}, abstract = {Vibrotactile feedback via body-worn vibrating belts is a common means of direction signalization - e.g. for navigational tasks. Consequently such feedback devices are used to guide blind or visually impaired people but can also be used to support other wayfinding tasks - for instance, guiding actors in virtual studio productions. Recent effort has been made to simplify this task by integrating vibrotactile feedback into virtual studio applications. In this work we evaluate the accuracy of an improved direction signalization technique, utilizing a body-worn vibrotactile belt with a limited number of tactors, and compare it to other work. The results from our user study indicate that it is possible to signalize different directions accurately, even with a small number of tactors spaced by 90°.}, language = {en} } @incollection{SuzukiNishojiHerder2000, author = {Suzuki, Kenji and Nishoji, Yuji and Herder, Jens}, title = {Implementation of Aural Attributes for Simulation of Room Effects in Virtual Environments}, series = {ACM Multimedia 2000}, booktitle = {ACM Multimedia 2000}, publisher = {ACM}, address = {Los Angeles}, pages = {439 -- 441}, year = {2000}, abstract = {The audio design for virtual environments includes simulation of acoustical room properties besides specifing sound sources and sinks and their behavior. Virtual environments supporting room reverberation not only gain realism but also provide additional information to the user about surrounding space. Catching the different sound properties by the different spaces requires partitioning the space by the properties of aural spaces. We define soundscape and aural attributes as an application and multimedia content interface. Calculated data on an abstract level is sent to spatialization backends. Part of this research was the implementation of a device driver for the Roland Sound Space Processor. This device not only directionalizes sound sources, but also controls room effects like reverberation.}, language = {en} } @inproceedings{FlaskoPogschebaHerderetal.2011, author = {Flasko, Matthias and Pogscheba, Patrick and Herder, Jens and Vonolfen, Wolfgang}, title = {Heterogeneous binocular camera-tracking in a Virtual Studio}, series = {8. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, booktitle = {8. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, address = {Wedel}, year = {2011}, abstract = {This paper presents a tracking of parts of a human body in a virtual TV studio environment. The tracking is based on a depth camera and a HD studio camera and aims at a realistic interaction between the actor and the computer generated environment. Stereo calibration methods are used to match corresponding pixels of both cameras (HD color and depth image). Hence the images were rectified and column aligned. The disparity is used to correct the depth image pixel by pixel. This image registration results in row and column aligned images where ghost regions are in the depth image resulting from occlusion. Both images are used to generate foreground masks with chroma and depth keying. The color image is taken for skin color segmentation to determine and distinguish the actor's hands and face. In the depth image the flesh colored regions were used to determine their spatial position. The extracted positions were augmented by virtual objects. The scene is rendered correctly with virtual camera parameters which were calculated from the camera calibration parameters. Generated computer graphics with alpha value are combined with the HD color images. This compositing shows interaction with augmented objects for verification. The additional depth information results in changing the size of objects next to the hands when the actor moves around.}, language = {en} } @inproceedings{HerderNeiderKinuwaki2007, author = {Herder, Jens and Neider, Christian and Kinuwaki, Shinichi}, title = {HDR-based lighting estimation for virtual studio (TV) environments}, series = {10th International Conference on Human and Computer}, booktitle = {10th International Conference on Human and Computer}, address = {D{\"u}sseldorf, Aizu-Wakamatsu}, pages = {111 -- 117}, year = {2007}, abstract = {Two high dynamic range HDR environments maps based on video streams from fish-eye lens cameras are used for generating virtual lights in a virtual set renderer. The task of realistic virtual light setup of scenes using captured environment maps might be eased as well as visual quality improves. We discuss the light setting problem for virtual studio tv productions which have mixed scenes of real objects, actors, virtual objects and virtual backgrounds. Benefits of hdr interactive light control are that the real light in the studio does not have to be remodeled and the artistic impression by using the light in the studio is also captured. An analysis of system requirements identifies technical challenges. We discuss the properties of a prototype system including test production.}, language = {en} } @incollection{HerderJaenschGarbe2006, author = {Herder, Jens and Jaensch, Kai and Garbe, Katharina}, title = {Haptische Interaktionen in Testumgebungen f{\"u}r Produktpr{\"a}sentation in Virtuellen Umgebungen}, series = {Augmented and Virtual Reality in der Produktentstehung}, volume = {188}, booktitle = {Augmented and Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, isbn = {3-939350-07-9}, pages = {87 -- 99}, year = {2006}, abstract = {Durch den vermehrten Einsatz von multimedialen Technologien werden in der Marktforschung die M{\"o}glichkeiten der Durchf{\"u}hrung flexibler und kosteng{\"u}nstiger Studien gegeben. In sehr fr{\"u}hen Phasen des Innovationsprozesses als Teil der Marktforschung k{\"o}nnen durch Einsatz von Virtuellen Umgebungen die Markteinf{\"u}hrungskonzepte f{\"u}r neue Produkte getestet werden. Mittels Anwendungen der Virtuellen Realit{\"a}t k{\"o}nnen neue Produkte einschlie{\"i}‚lich des Marketingkonzeptes auch haptisch getestet werden, ohne dass dieses Produkt bereits physisch vorhanden sein muss. Informationen werden dem Benutzer in Virtuellen Umgebungen haupts{\"a}chlich visuell und erg{\"a}nzend auditiv {\"u}bermittelt. Verbreitete Benutzerschnittstellen sind Interaktionsger{\"a}te wie Stylus und Wand. Durch die haptische Wahrnehmung werden Informationen menschengerechter, effektiver und intuitiver wahrgenommen. Objekte in einer virtuellen Umgebung k{\"o}nnen durch den Einsatz haptischer Interaktionsger{\"a}te ertastet und erf{\"u}hlt werden und machen dadurch eine differenziertere Beurteilung und Einsch{\"a}tzung durch den Benutzer eben dieser Objekte m{\"o}glich. Der Fokus des vorliegenden Projektes liegt daher auf der interaktiven haptischen Produktpr{\"a}sentation in einer virtuellen Einkaufsumgebung, die in Online-Befragungen mit zus{\"a}tzlichen Werbefilmen eingebettet ist. Als Nebenprodukt wurde das Werkzeug Open Inventor um Knoten zur Modellierung von haptischen Szeneneigenschaften erweitert.}, language = {de} } @article{DaemenHerderKochetal.2017, author = {Daemen, Jeff and Herder, Jens and Koch, Cornelius and Ladwig, Philipp and Wiche, Roman and Wilgen, Kai}, title = {Halbautomatische Steuerung von Kamera und Bildmischer bei Live-{\"U}bertragungen}, series = {Fachzeitschrift f{\"u}r Fernsehen, Film und Elektronische Medien}, journal = {Fachzeitschrift f{\"u}r Fernsehen, Film und Elektronische Medien}, number = {11}, publisher = {Schiele \& Sch{\"o}n}, pages = {501 -- 505}, year = {2017}, abstract = {Live-Video-Broadcasting mit mehreren Kameras erfordert eine Vielzahl von Fachkenntnissen. Robotersysteme erm{\"o}glichen zwar die Automatisierung von g{\"a}ngigen und wiederholten Tracking-Aufnahmen, diese erlauben jedoch keine kurzfristigen Anpassungen aufgrund von unvorhersehbaren Ereignissen. In diesem Beitrag wird ein modulares, automatisiertes Kamerasteuerungs- und Bildschnitt-System eingef{\"u}hrt, das auf grundlegenden kinematografischen Regeln basiert. Die Positionen der Akteure werden durch ein markerloses Tracking-System bereitgestellt. Dar{\"u}ber hinaus werden Tonpegel der Lavaliermikrofone der Akteure zur Analyse der aktuellen Szene verwendet. Ein Expertensystem ermittelt geeignete Kamerawinkel und entscheidet, wann von einer Kamera auf eine andere umgeschaltet werden soll. Eine Testproduktion wurde durchgef{\"u}hrt, um den entwickelten Prototyp in einem Live-Broadcast-Szenario zu beobachten und diente als Videodemonstration f{\"u}r eine Evaluierung.}, language = {de} } @inproceedings{BurgaDaemenDjuderijaetal.2013, author = {Burga, Jose and Daemen, Jeff and Djuderija, Sascha and Gnehr, Maren and Goossens, Lars and Hartz, Sven and Haufs-Brusberg, Peter and Herder, Jens and Ibrahim, Mohammed and Koop, Nikolas and Leske, Christophe and Meyer, Laurid and M{\"u}ller, Antje and Salgert, Bj{\"o}rn and Schroeder, Richard and Thiele, Simon}, title = {Four Metamorphosis States in a Distributed Virtual (TV) Studio: Human, Cyborg, Avatar, and Bot}, series = {10th International Conference on Visual Media Production (CVMP 2013), London}, booktitle = {10th International Conference on Visual Media Production (CVMP 2013), London}, address = {London}, year = {2013}, abstract = {The major challenge in virtual studio technology is the interaction between the actor and virtual objects. Within a distributed live production, two locally separated markerless tracking systems where used simultaneously alongside a virtual studio. The production was based on a fully tracked actor, cyborg (half actor, half graphics), avatar, and a bot. All participants could interact and throw a virtual disc. This setup is compared and mapped to Milgram's continuum and technical challenges are described.}, language = {en} } @inproceedings{YamazakiHerder2000, author = {Yamazaki, Yasuhiro and Herder, Jens}, title = {Exploring Spatial Audio Conferencing Functionality in Multiuser Virtual Environments}, series = {The Third International Conference on Collaborative Virtual Environments}, booktitle = {The Third International Conference on Collaborative Virtual Environments}, publisher = {ACM}, address = {San Francisco}, pages = {207 -- 208}, year = {2000}, abstract = {A chatspace was developed that allows conversation with 3D sound using networked streaming in a shared virtual environment. The system provides an interface to advanced audio features, such as a "whisper function" for conveying a confided audio stream. This study explores the use of spatial audio to enhance a user's experience in multiuser virtual environments.}, language = {en} } @misc{JaroschHerderLangmann2022, author = {Jarosch, Monika and Herder, Jens and Langmann, Mathias}, title = {Entwicklung einer AR-Applikation zur kosteneffektiven volumetrischen Erfassung von Baugruben}, series = {gis.Science}, volume = {2022}, journal = {gis.Science}, number = {2}, issn = {2698-4571}, pages = {75 -- 83}, year = {2022}, abstract = {Die volumetrische Erfassung von Aush{\"u}ben auf Baustellen ist ein kostenrelevanter Faktor und wird auch heute im t{\"a}glichen Baustellenbetrieb oft noch in manueller Detailarbeit durchgef{\"u}hrt. Kosteng{\"u}nstige Sensoren zur Tiefenerfassung erm{\"o}glichen die halbautomatische Erfassung von Baugruben. Augmented Reality (AR) kann f{\"u}r diesen Prozess das n{\"o}tige Feedback liefern. Vorgestellt wird ein Prototyp, bestehend aus einem Tablet mit integrierter Kamera und einem Lidar-Scanner. Es wird die Erfassung des Volumens bez{\"u}glich Nutzbarkeit und Genauigkeit mit Einsatz von AR getestet und evaluiert. Zur Bestimmung des Volumens wird unter Verwendung von Strahlen mit Unterst{\"u}tzung einer Grafik-Engine ein Algorithmus entwickelt. Der Algorithmus ist robust gegen nicht vollst{\"a}ndig geschlossene Volumen. Die Bedienung, {\"U}berpr{\"u}fung und Visualisierung findet durch praktischen Einsatz von AR statt.}, language = {de} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Enhancing Perspicuity of Objects in Virtual Reality Environments}, series = {Proceedings, Second International Conference on Cognitive Technology}, booktitle = {Proceedings, Second International Conference on Cognitive Technology}, editor = {Gorayska, Barbara and Nehaniv, Chrystopher L. and Marsh, Jonathon P.}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {0-8186-8084-9}, pages = {228 -- 237}, year = {1997}, abstract = {In an information-rich Virtual Reality (VR) environment, the user is immersed in a world containing many objects providing that information. Given the finite computational resources of any computer system, optimization is required to ensure that the most important information is presented to the user as clearly as possible and in a timely fashion. In particular, what is desired are means whereby the perspicuity of an object may be enhanced when appropriate. An object becomes more perspicuous when the information it provides to the user becomes more readily apparent. Additionally, if a particular object provides high-priority information, it would be advantageous to make that object obtrusive as well as highly perspicuous. An object becomes more obtrusive if it draws attention to itself (or equivalently, if it is hard to ignore). This paper describes a technique whereby objects may dynamically adapt their representation in a user's environment according to a dynamic priority evaluation of the information each object provides. The three components of our approach are: - an information manager that evaluates object information priority, - an enhancement manager that tabulates rendering features associated with increasing object perspicuity and obtrusion as a function of priority, and - a resource manager that assigns available object rendering resources according to features indicated by the enhancement manager for the priority set for each object by the information manager. We consider resources like visual space (pixels), sound spatialization channels (mixels), MIDI/audio channels, and processing power, and discuss our approach applied to different applications. Assigned object rendering features are implemented locally at the object level (e.g., object facing the user using the billboard node in VRML 2.0) or globally, using helper applications (e.g., active spotlights, semi-automatic cameras).}, language = {en} } @inproceedings{RyskeldievOchiaiCohenetal.2018, author = {Ryskeldiev, Bektur and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Distributed Metaverse: Creating Decentralized Blockchain-based Model for Peer-to-peer Sharing of Virtual Spaces for Mixed Reality Applications}, series = {Proceedings of the 9th Augmented Human International Conference}, booktitle = {Proceedings of the 9th Augmented Human International Conference}, publisher = {ACM}, isbn = {978-1-4503-5415-8}, doi = {10.1145/3174910.3174952}, pages = {7 -- 9}, year = {2018}, abstract = {Mixed reality telepresence is becoming an increasingly popular form of interaction in social and collaborative applications. We are interested in how created virtual spaces can be archived, mapped, shared, and reused among different applications. Therefore, we propose a decentralized blockchain-based peer-to-peer model of distribution, with virtual spaces represented as blocks. We demonstrate the integration of our system in a collaborative mixed reality application and discuss the benefits and limitations of our approach.}, language = {en} } @article{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Journal of the 3D-Forum Society}, volume = {14}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {146 -- 151}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field, including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness) for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @inproceedings{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Third International Conference on Human and Computer}, booktitle = {Third International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {71 -- 76}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field,including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness)for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @incollection{HerderGeigerLehmannetal.2009, author = {Herder, Jens and Geiger, Christian and Lehmann, Anke and Vierjahn, Tom and W{\"o}ldecke, Bj{\"o}rn}, title = {Designstrategien f{\"u}r den Einsatz von vibrotaktielem Feedback in Mixed Reality Anwendungen}, series = {Augmented \& Virtual Reality in der Produktentstehung}, volume = {232}, booktitle = {Augmented \& Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, isbn = {978-3-939350-71-2}, pages = {225 -- 240}, year = {2009}, language = {de} } @inproceedings{GeigerHerderGoebeletal.2010, author = {Geiger, Christian and Herder, Jens and G{\"o}bel, Sebastian and Heinze, Christin and Marinos, Dionysios}, title = {Design and Virtual Studio Presentation of a Traditional Archery Simulator}, series = {Proceedings of the Entertainment Interfaces Track 2010 at Interaktive Kulturen, Duisburg, Germany, September 12-15, 2010}, booktitle = {Proceedings of the Entertainment Interfaces Track 2010 at Interaktive Kulturen, Duisburg, Germany, September 12-15, 2010}, address = {Duisburg}, pages = {37 -- 44}, year = {2010}, abstract = {In this paper we describe the design of a virtual reality simulator for traditional intuitive archery. Traditional archers aim without a target figure. Good shooting results require an excellent body-eye coordination that allows the user to perform identical movements when drawing the bow. Our simulator provides a virtual archery experience and supports the user to learn and practice the motion sequence of traditional archery in a virtual environment. We use an infrared tracking system to capture the user's movements in order to correct his movement. To provide a realistic haptic feedback a real bow is used as interaction device. Our system provides a believable user experience and supports the user to learn how to shoot in the traditional way. Following a user-centered iterative design approach we developed a number of prototypes and evaluated them for refinement in sequent iteration cycles. For illustration purposes we created a short video clip in our virtual studio about this project that presents the main ideas in an informative yet entertaining way.}, language = {en} } @article{CohenHerderLMartens1999, author = {Cohen, Michael and Herder, Jens and L. Martens, William}, title = {Cyberspatial Audio Technology}, series = {The Journal of the Acoustical Society of Japan (E)}, volume = {20}, journal = {The Journal of the Acoustical Society of Japan (E)}, number = {6}, doi = {10.1250/ast.20.389}, pages = {389 -- 395}, year = {1999}, abstract = {Cyberspatial audio applications are distinguished from the broad range of spatial audio applications in a number of important ways that help to focus this review. Most significant is that cyberspatial audio is most often designed to be responsive to user inputs. In contrast to non-interactive auditory displays, cyberspatial auditory displays typically allow active exploration of the virtual environment in which users find themselves. Thus, at least some portion of the audio presented in a cyberspatial environment must be selected, processed, or otherwise rendered with minimum delay relative to user input. Besides the technological demands associated with realtime delivery of spatialized sound, the type and quality of auditory experiences supported are also very different from those associated with displays that support stationary sound localization.}, language = {en} } @inproceedings{BeckerHerder2012, author = {Becker, Thomas and Herder, Jens}, title = {Cost effective tangibles using fiducials for infrared multi-touch frames}, series = {15th International Conference on Human and Computer}, booktitle = {15th International Conference on Human and Computer}, address = {Hamamatsu/Aizu-Wakamatsu/Duesseldorf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-16011}, pages = {7}, year = {2012}, abstract = {The late immersion of multi-touch sensitive displays enables the use of tangibles on multi-touch screens. There a several wide spread and/or sophisticated solutions to fulfill this need but they seem to have some flaws. One popular system at the time of writing is an overlay frame that can be placed on a normal display with the corresponding size. The frame creates a grid with infrared light emitting diodes. The disruption of this grid can be detected and messages with the positions are sent via usb to a connected computer. This system is quite robust in matters of ambient light insensitivity and also fast to calibrate. Unfortunately it is not created with the recognition of tangibles in mind and printed patterns can not be resolved. This article summarizes an attempt to create fiducials that are recognized by an infrared multi-touch frame as fingers. Those false fingers are checked by a software for known patterns. Once a known pattern (= fiducial) has been recognized its position and orientation are send with the finger positions towards the interactive software. The usability is tested with an example application where tangibles and finger touches are used in combination.}, language = {en} } @inproceedings{Herder1997, author = {Herder, Jens}, title = {Cooperative Tools for Teaching : an Impact of a Network Environment}, series = {Annual Report of the Information Systems and Technology Center, University of Aizu, October 1997}, booktitle = {Annual Report of the Information Systems and Technology Center, University of Aizu, October 1997}, address = {Aizu}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-827}, pages = {3 -- 8}, year = {1997}, abstract = {Education at the University of Aizu is focussed upon computer science. Besides being the subject matter of many courses, however, the computer also plays a vital role in the educational process itself, both in the distribution of instructional media, and in providing students with valuable practical experience. All students have unlimited access (24-hours-a-day) to individual networked workstations, most of which are multimedia-capable (even video capture is possible in two exercise rooms). Without software and content tailored for computer-aided instruction, the hardware becomes an expensive decoration. In any case, there is a need to better educate the instructors and students in the use of the equipment. In the interest of facilitating effective, collaborative use of network-based computers in teaching, this article explores the impact that a network environment can have on such activities. First, as a general overview, and to examine the motivation for the use of a network environment in teaching, this article reviews a range of different styles of collaboration. Then the article shows what kind of tools are available for use, within the context of what has come to be called Computer-Supported Cooperative Work (CSCW).}, language = {en} } @inproceedings{BallesterRipollHerderLadwigetal.2016, author = {Ballester Ripoll, Marina and Herder, Jens and Ladwig, Philipp and Vermeegen, Kai}, title = {Comparison of two Gesture Recognition Sensors for Virtual TV Studios}, series = {GI-VRAR, Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 13. Workshop der GI-Fachgruppe VR/AR,}, booktitle = {GI-VRAR, Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 13. Workshop der GI-Fachgruppe VR/AR,}, editor = {Pfeiffer, Thies and Fr{\"o}hlich, Julia and Kruse, Rolf}, publisher = {Shaker Verlag}, address = {Herzogenrath}, isbn = {978-3-8440-4718-9}, year = {2016}, abstract = {In order to improve the interactivity between users and computers, recent technologies focus on incorporating gesture recognition into interactive systems. The aim of this article is to evaluate the effectiveness of using a Myo control armband and the Kinect 2 for recognition of gestures in order to interact with virtual objects in a weather report scenario. The Myo armband has an inertial measurement unit and is able to read electrical activity produced by skeletal muscles, which can be recognized as gestures, which are trained by machine learning. A Kinect sensor was used to build up a dataset which contains motion recordings of 8 different gestures and was also build up by a gesture training machine learning algorithm. Both input methods, the Kinect 2 and the Myo armband, were evaluated with the same interaction patterns in a user study, which allows a direct comparison and reveals benefits and limits of each technique.}, language = {en} } @inproceedings{Herder2000, author = {Herder, Jens}, title = {Challenges of Virtual Sets: From Broadcasting to Interactive Media}, series = {Seventh International Workshop on Human}, booktitle = {Seventh International Workshop on Human}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {13 -- 17}, year = {2000}, abstract = {Virtual sets have evolved from computer-generated, prerendered 2D backgrounds to realtime, responsive 3D computer graphics and are nowadays standard repertoire of broadcasting divisions. The graphics, which are combined with real video feed becoming moresophisticated, real looking and more responsive. We will look at the recent developments and suggest further developments like integration of spatial audio into the studio production and generating interactive media streams. Educational institutes recognize the demands of the rising media industry and established new courses on media technology like the Duesseldorf University of Applied Sciences.}, language = {en} } @inproceedings{DeppeNemitzHerder2018, author = {Deppe, Robert and Nemitz, Oliver and Herder, Jens}, title = {Augmented reality for supporting manual non-destructive ultrasonic testing of metal pipes and plates}, series = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, editor = {Herder, Jens and Geiger, Christian and D{\"o}rner, Ralf and Grimm, Paul}, publisher = {Shaker Verlag}, address = {Herzogenrath}, isbn = {978-3-8440-6215-1}, doi = {10.2370/9783844062151}, pages = {45 -- 52}, year = {2018}, abstract = {We describe an application of augmented reality technology for non-destructive testing of products in the metal-industry. The prototype is created with hard- and software, that is usually employed in the gaming industry, and delivers positions for creating ultra- sonic material scans (C-scans). Using a stereo camera in combination with an hmd enables realtime visualisation of the probes path, as well as the setting of virtual markers on the specimen. As a part of the implementation the downhill simplex optimization algorithm is implemented to fit the specimen to a cloud of recorded surface points. The accuracy is statistically tested and evaluated with the result, that the tracking system is accurate up to ca. 1-2 millimeters in well set-up conditions. This paper is of interest not only for research institutes of the metal-industry, but also for any areas of work, in which the enhancement with augmented reality is possible and a precise tracking is necessary.}, language = {en} } @article{KuniiHerderMyszkowskietal.1994, author = {Kunii, Tosiyasu L. and Herder, Jens and Myszkowski, Karol and Okunev, Oleg and Okuneva, Galina and Ibusuki, Masumi}, title = {Articulation Simulation for an Intelligent Dental Care System}, series = {Displays}, volume = {15}, journal = {Displays}, number = {3}, pages = {181 -- 188}, year = {1994}, abstract = {CAD/CAM techniques are used increasingly in dentistry for design and fabrication of teeth restorations. An important issue is preserving occlusal contacts of teeth after restoration. Traditional techniques based on the use of casts with mechanical articulators require manual adjustment of occlusal surface, which becomes impractical when hard restoration materials like porcelain are used; they are also time and labor consuming. Most existing computer systems ignore completely such an articulation check, or perform the check at the level of a tooth and its immediate neighbors. We present a new mathematical model and a related user interface for global articulation simulation, developed for the Intelligent Dental Care System project. The aim of the simulation is elimination of the use of mechanical articulators and manual adjustment in the process of designing dental restorations and articulation diagnostic. The mathematical model is based upon differential topological modeling of the jawbs considered as a mechanical system. The user interface exploits metaphors that are familiar to dentists from everyday practice. A new input device designed specifically for use with articulation simulation is proposed.}, language = {en} } @inproceedings{RyskeldievCohenHerder2017, author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens}, title = {Applying rotational tracking and photospherical imagery to immersive mobile telepresence and live video streaming groupware}, series = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, booktitle = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-5410-3}, doi = {10.1145/3132787.3132813}, pages = {2}, year = {2017}, abstract = {Mobile live video streaming is becoming an increasingly popular form of interaction both in social media and remote collaboration scenarios. However, in most cases the streamed video does not take mobile devices' spatial data into account (e.g., the viewers do not know the spatial orientation of a streamer), or use such data only in specific scenarios (e.g., to navigate around a spherical video stream).}, language = {en} } @inproceedings{Herder2001, author = {Herder, Jens}, title = {Applications of Spatial Auditory Displays in the Context of Art and Music}, series = {Human Supervision and Control in Engineering and Music}, booktitle = {Human Supervision and Control in Engineering and Music}, publisher = {Universit{\"a}t Kassel}, address = {Kassel}, year = {2001}, language = {en} } @article{AmanoMatsushitaYanagawaetal.1998, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Martens, William and Koba, Yoshiharu and Tohyama, Mikio}, title = {A Virtual Reality Sound System Using Room-Related Transfer Functions Delivered Through a Multispeaker Array: the PSFC at the University of Aizu Multimedia Center}, series = {TVRSJ}, volume = {3}, journal = {TVRSJ}, number = {1}, publisher = {J-STAGE}, doi = {10.18974/tvrsj.3.1_1}, pages = {1 -- 12}, year = {1998}, abstract = {The PSFC, or Pioneer Sound Field Controller, is a DSP-driven hemispherical loudspeaker array, installed at the University of Aizu Multimedia Center. The PSFC features realtime manipulation of the primary components of sound spatialization for each of two audio sources located in a virtual environment, including the content (apparent direction and distance) and context (room characteristics: reverberation level, room size and liveness). In an alternate mode, it can also direct the destination of the two separate input signals across 14 loudspeakers, manipulating the direction of the virtual sound sources with no control over apparent distance other than that afforded by source loudness (including no simulated environmental reflections or reverberation). The PSFC speaker dome is about 10 m in diameter, accommodating about fifty simultaneous users, including about twenty users comfortably standing or sitting near its ``sweet spot,'' the area in which the illusions of sound spatialization are most vivid. Collocated with a large screen rear-projection stereographic display, the PSFC is intended for advanced multimedia and virtual reality applications.}, language = {en} } @incollection{HerderMyszkowskiKuniietal.1996, author = {Herder, Jens and Myszkowski, Karol and Kunii, Tosiyasu L. and Ibusuki, Masumi}, title = {A Virtual Reality Interface to an Intelligent Dental Care System}, series = {Medicine Meets Virtual Reality 4}, booktitle = {Medicine Meets Virtual Reality 4}, editor = {Weghorst, Suzanne J. and Sieburg, Hans B. and Morgan, Karen S.}, publisher = {IOS Press}, address = {Amsterdam}, pages = {17 -- 20}, year = {1996}, language = {en} } @inproceedings{IshikawaHiroseHerder1998, author = {Ishikawa, Kimitaka and Hirose, Minefumi and Herder, Jens}, title = {A Sound Spatialization Server for a Speaker Array as an Integrated Part of a Virtual Environment}, series = {IEEE YUFORIC Germany 98}, booktitle = {IEEE YUFORIC Germany 98}, publisher = {IEEE}, address = {Stuttgart}, year = {1998}, abstract = {Spatial sound plays an important role in virtual reality environments, allowing orientation in space, giving a feeling of space, focusing the user on events in the scene, and substituting missing feedback cues (e.g., force feedback). The sound spatialization framework of the University of Aizu, which supports number of spatialization backends, has been extended to include a sound spatialization server for a multichannel loudspeaker array (Pioneer Sound Field Control System). Our goal is that the spatialization server allows easy integration into virtual environments. Modeling of distance cues, which are essential for full immersion, is discussed. Furthermore, the integration of this prototype into different applications allowed us to reveal the advantages and problems of spatial sound for virtual reality environments.}, language = {en} } @phdthesis{Herder1999, author = {Herder, Jens}, title = {A Sound Spatialization Resource Management Framework}, publisher = {University of Tsukuba}, address = {Tsukuba}, organization = {University of Tsukuba}, year = {1999}, abstract = {In a virtual reality environment, users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a practical system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. A sound spatialization resource manager, introduced in this thesis, controls sound resources and optimizes fidelity (presence) under given conditions, using a priority scheme based on psychoacoustics. Objects which are spatially close together can be coalesced by a novel clustering algorithm, which considers listener localization errors. Application programmers and VR scene designers are freed from the burden of assigning mixels and predicting sound source locations. The framework includes an abstract interface for sound spatialization backends, an API for the VR environments, and multimedia authoring tools.}, language = {en} } @article{HesseKoenigLogietal.1993, author = {Hesse, Jan and K{\"o}nig, Rainer and Logi, Filippo and Herder, Jens}, title = {A Prototype of an Interface Builder for the Common Lisp Interface Manager - CLIB}, series = {ACM Sigplan Notices}, volume = {28}, journal = {ACM Sigplan Notices}, number = {8}, publisher = {Forschungszentrum Informatik (FZI), Technical Expert Systems and Robotics}, doi = {10.1145/163114.163116}, pages = {19 -- 28}, year = {1993}, abstract = {The Common Lisp Interface Manager (CLIM) is used to develop graphical user interfaces for Lisp-basedapplications. With the prototype of the CLIM interface Builder (CLIB) the programmer can generate code for CLIM interactively. The developing process will be fast and less prone to errors. With this new tool, the interactive rapid prototyping reduces costs of a specification phase. Here we present the concept and first results of the prototype of CLIB.}, language = {en} } @inproceedings{PaulHerder2018, author = {Paul, Felix and Herder, Jens}, title = {A model-based filtering approach for real-time human motion data}, series = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, editor = {Herder, Jens and Geiger, Christian and D{\"o}rner, Ralf and Grimm, Paul}, publisher = {Shaker Verlag}, address = {Herzogenrath}, isbn = {978-3-8440-6215-1}, doi = {10.2370/9783844062151}, pages = {37 -- 44}, year = {2018}, abstract = {Acquiring human motion data from video images plays an important role in the field of computer vision. Ground truth tracking systems require markers to create high quality motion data. But in many applications it is desired to work without markers. In recent years affordable hardware for markerless tracking systems was made available at a consumer level. Efficient depth camera systems based on Time-of-Flight sensors and structured light systems have made it possible to record motion data in real time. However, the gap between the quality of marker-based and markerless systems is high. The error sources of a markerless motion tracking pipeline are discussed and a model-based filter is proposed, which adapts depending on spatial location. The proposed method is then proven to be more robust and accurate than the unfiltered data stream and can be used to visually enhance the presence of an actor within a virtual environment in live broadcast productions.}, language = {en} } @inproceedings{VermeegenHerder2018, author = {Vermeegen, Kai and Herder, Jens}, title = {A Lighthouse-based Camera Tracking System for Professional Virtual Studios}, series = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, editor = {Herder, Jens and Geiger, Christian and D{\"o}rner, Ralf and Grimm, Paul}, publisher = {Shaker Verlag}, address = {Herzogenrath}, isbn = {978-3-8440-6215-1}, doi = {10.2370/9783844062151}, pages = {19 -- 26}, year = {2018}, abstract = {This article describes the possibilities and problems that occur using the SteamVR tracking 2.0 system as a camera tracking system in a virtual studio and explains an approach for implementation and calibration within a professional studio environment. The tracking system allows for cost effective deployment. Relevant application fields are also mixed reality recording and streaming of AR and VR experiences.}, language = {en} } @inproceedings{MartensHerderShiba1999, author = {Martens, William L. and Herder, Jens and Shiba, Yoshiki}, title = {A filtering model for efficient rendering of the spatial image of an occluded virtual sound source}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Rendering realistic spatial sound imagery for complex virtual environments must take into account the effects of obstructions such as reflectors and occluders. It is relatively well understood how to calculate the acoustical consequence that would be observed at a given observation point when an acoustically opaque object occludes a sound source. But the interference patterns generated by occluders of various geometries and orientations relative to the virtual source and receiver are computationally intense if accurate results are required. In many applications, however, it is sufficient to create a spatial image that is recognizable by the human listener as the sound of an occluded source. In the interest of improving audio rendering efficiency, a simplified filtering model was developed and its audio output submitted to psychophysical evaluation. Two perceptually salient components of occluder acoustics were identified that could be directly related to the geometry and orientation of a simple occluder. Actual occluder impulse responses measured in an anechoic chamber resembled the responses of a model incorporating only a variable duration delay line and a low-pass filter with variable cutoff frequenc}, language = {en} } @article{NovotnyJaenschHerder2005, author = {Novotny, Tom and Jaensch, Kai and Herder, Jens}, title = {A Database Driven and Virtual Reality supported Environment for Marketing Studies}, series = {Journal of the 3D-Forum Society}, volume = {19}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {95 -- 101}, year = {2005}, abstract = {In today's market research mechanisms multi modal technologies are significant tools to perform flexible and price efficient studies for not only consumer products but also consumer goods. Current appraisal mechanisms in combination with applied computer graphics can improve the assessment of a product's launch in the very early design phase or an innovation process. The combination of online questionnaires, Virtual Reality (VR) applications and a database management system offers a powerful tool to let a consumer judge products as well as innovated goods even without having produced a single article. In this paper we present an approach of consumer good studies consisting of common as well as interactive VR product presentations and online questionnaires bases on a bidirectional database management solution to configure and manage numerous studies, virtual sets, goods and participants in an effective way to support the estimation of the received data. Non-programmers can create their test environment including a VR scenario very quickly without any effort. Within the extensive knowledge of consumer goods, marketing instruments can be defined to shorten and improve the rollout process in the early product stages.}, language = {en} } @article{HerderYamazaki2000, author = {Herder, Jens and Yamazaki, Yasuhiro}, title = {A Chatspace Deploying Spatial Audio for Enhanced Conferencing}, series = {Journal of the 3D-Forum Society}, volume = {15}, journal = {Journal of the 3D-Forum Society}, number = {1}, year = {2000}, language = {en} } @inproceedings{HerderYamazaki2000, author = {Herder, Jens and Yamazaki, Yasuhiro}, title = {A Chatspace Deploying Spatial Audio for Enhanced Conferencing}, series = {Third International Conference on Human and Computer}, booktitle = {Third International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {197 -- 202}, year = {2000}, language = {en} } @inproceedings{BaranowskiUtzigFischeretal.2018, author = {Baranowski, Artur and Utzig, Sebastian and Fischer, Philipp and Gerndt, Andreas and Herder, Jens}, title = {3D spacecraft configuration using immersive AR technology}, series = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, booktitle = {Workshop Proceedings / Tagungsband: Virtuelle und Erweiterte Realit{\"a}t - 15. Workshop der GI-Fachgruppe VR/AR}, editor = {Herder, Jens and Geiger, Christian and D{\"o}rner, Ralf and Grimm, Paul}, publisher = {Shaker Verlag}, address = {Herzogenrath}, isbn = {978-3-8440-6215-1}, doi = {10.2370/9783844062151}, pages = {71 -- 82}, year = {2018}, abstract = {In this paper we propose an integrated immersive augmented reality solution for a software tool supporting spacecraft design and verification. The spacecraft design process relies on expertise in many domains, such as thermal and structural engineering. The various subsystems of a spacecraft are highly interdependent and have differing requirements and constraints. In this context, interactive visualizations play an important role in making expert knowledge accessible. Recent immersive display technologies offer new ways of presenting and interacting with computer-generated content. Possibilities and challenges for spacecraft configuration employing these technologies are explored and discussed. A user interface design for an application using the Microsoft HoloLens is proposed. To this end, techniques for selecting a spacecraft component and manipulating its position and orientation in 3D space are developed and evaluated. Thus, advantages and limitations of this approach to spacecraft configuration are revealed and discussed.}, language = {en} }