@inproceedings{YamazakiHerder2000, author = {Yamazaki, Yasuhiro and Herder, Jens}, title = {Exploring Spatial Audio Conferencing Functionality in Multiuser Virtual Environments}, series = {The Third International Conference on Collaborative Virtual Environments}, booktitle = {The Third International Conference on Collaborative Virtual Environments}, publisher = {ACM}, address = {San Francisco}, pages = {207 -- 208}, year = {2000}, abstract = {A chatspace was developed that allows conversation with 3D sound using networked streaming in a shared virtual environment. The system provides an interface to advanced audio features, such as a "whisper function" for conveying a confided audio stream. This study explores the use of spatial audio to enhance a user's experience in multiuser virtual environments.}, language = {en} } @misc{JaroschHerderLangmann2022, author = {Jarosch, Monika and Herder, Jens and Langmann, Mathias}, title = {Entwicklung einer AR-Applikation zur kosteneffektiven volumetrischen Erfassung von Baugruben}, series = {gis.Science}, volume = {2022}, journal = {gis.Science}, number = {2}, issn = {2698-4571}, pages = {75 -- 83}, year = {2022}, abstract = {Die volumetrische Erfassung von Aush{\"u}ben auf Baustellen ist ein kostenrelevanter Faktor und wird auch heute im t{\"a}glichen Baustellenbetrieb oft noch in manueller Detailarbeit durchgef{\"u}hrt. Kosteng{\"u}nstige Sensoren zur Tiefenerfassung erm{\"o}glichen die halbautomatische Erfassung von Baugruben. Augmented Reality (AR) kann f{\"u}r diesen Prozess das n{\"o}tige Feedback liefern. Vorgestellt wird ein Prototyp, bestehend aus einem Tablet mit integrierter Kamera und einem Lidar-Scanner. Es wird die Erfassung des Volumens bez{\"u}glich Nutzbarkeit und Genauigkeit mit Einsatz von AR getestet und evaluiert. Zur Bestimmung des Volumens wird unter Verwendung von Strahlen mit Unterst{\"u}tzung einer Grafik-Engine ein Algorithmus entwickelt. Der Algorithmus ist robust gegen nicht vollst{\"a}ndig geschlossene Volumen. Die Bedienung, {\"U}berpr{\"u}fung und Visualisierung findet durch praktischen Einsatz von AR statt.}, language = {de} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Enhancing Perspicuity of Objects in Virtual Reality Environments}, series = {Proceedings, Second International Conference on Cognitive Technology}, booktitle = {Proceedings, Second International Conference on Cognitive Technology}, editor = {Gorayska, Barbara and Nehaniv, Chrystopher L. and Marsh, Jonathon P.}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {0-8186-8084-9}, pages = {228 -- 237}, year = {1997}, abstract = {In an information-rich Virtual Reality (VR) environment, the user is immersed in a world containing many objects providing that information. Given the finite computational resources of any computer system, optimization is required to ensure that the most important information is presented to the user as clearly as possible and in a timely fashion. In particular, what is desired are means whereby the perspicuity of an object may be enhanced when appropriate. An object becomes more perspicuous when the information it provides to the user becomes more readily apparent. Additionally, if a particular object provides high-priority information, it would be advantageous to make that object obtrusive as well as highly perspicuous. An object becomes more obtrusive if it draws attention to itself (or equivalently, if it is hard to ignore). This paper describes a technique whereby objects may dynamically adapt their representation in a user's environment according to a dynamic priority evaluation of the information each object provides. The three components of our approach are: - an information manager that evaluates object information priority, - an enhancement manager that tabulates rendering features associated with increasing object perspicuity and obtrusion as a function of priority, and - a resource manager that assigns available object rendering resources according to features indicated by the enhancement manager for the priority set for each object by the information manager. We consider resources like visual space (pixels), sound spatialization channels (mixels), MIDI/audio channels, and processing power, and discuss our approach applied to different applications. Assigned object rendering features are implemented locally at the object level (e.g., object facing the user using the billboard node in VRML 2.0) or globally, using helper applications (e.g., active spotlights, semi-automatic cameras).}, language = {en} } @inproceedings{RyskeldievOchiaiCohenetal.2018, author = {Ryskeldiev, Bektur and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Distributed Metaverse: Creating Decentralized Blockchain-based Model for Peer-to-peer Sharing of Virtual Spaces for Mixed Reality Applications}, series = {Proceedings of the 9th Augmented Human International Conference}, booktitle = {Proceedings of the 9th Augmented Human International Conference}, publisher = {ACM}, isbn = {978-1-4503-5415-8}, doi = {10.1145/3174910.3174952}, pages = {7 -- 9}, year = {2018}, abstract = {Mixed reality telepresence is becoming an increasingly popular form of interaction in social and collaborative applications. We are interested in how created virtual spaces can be archived, mapped, shared, and reused among different applications. Therefore, we propose a decentralized blockchain-based peer-to-peer model of distribution, with virtual spaces represented as blocks. We demonstrate the integration of our system in a collaborative mixed reality application and discuss the benefits and limitations of our approach.}, language = {en} } @article{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Journal of the 3D-Forum Society}, volume = {14}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {146 -- 151}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field, including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness) for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @inproceedings{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Third International Conference on Human and Computer}, booktitle = {Third International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {71 -- 76}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field,including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness)for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @incollection{HerderGeigerLehmannetal.2009, author = {Herder, Jens and Geiger, Christian and Lehmann, Anke and Vierjahn, Tom and W{\"o}ldecke, Bj{\"o}rn}, title = {Designstrategien f{\"u}r den Einsatz von vibrotaktielem Feedback in Mixed Reality Anwendungen}, series = {Augmented \& Virtual Reality in der Produktentstehung}, volume = {232}, booktitle = {Augmented \& Virtual Reality in der Produktentstehung}, editor = {Gausemeier, J{\"u}rgen and Grafe, Michael}, publisher = {Heinz Nixdorf Institut, Universit{\"a}t Paderborn}, address = {Paderborn}, isbn = {978-3-939350-71-2}, pages = {225 -- 240}, year = {2009}, language = {de} } @inproceedings{GeigerHerderGoebeletal.2010, author = {Geiger, Christian and Herder, Jens and G{\"o}bel, Sebastian and Heinze, Christin and Marinos, Dionysios}, title = {Design and Virtual Studio Presentation of a Traditional Archery Simulator}, series = {Proceedings of the Entertainment Interfaces Track 2010 at Interaktive Kulturen, Duisburg, Germany, September 12-15, 2010}, booktitle = {Proceedings of the Entertainment Interfaces Track 2010 at Interaktive Kulturen, Duisburg, Germany, September 12-15, 2010}, address = {Duisburg}, pages = {37 -- 44}, year = {2010}, abstract = {In this paper we describe the design of a virtual reality simulator for traditional intuitive archery. Traditional archers aim without a target figure. Good shooting results require an excellent body-eye coordination that allows the user to perform identical movements when drawing the bow. Our simulator provides a virtual archery experience and supports the user to learn and practice the motion sequence of traditional archery in a virtual environment. We use an infrared tracking system to capture the user's movements in order to correct his movement. To provide a realistic haptic feedback a real bow is used as interaction device. Our system provides a believable user experience and supports the user to learn how to shoot in the traditional way. Following a user-centered iterative design approach we developed a number of prototypes and evaluated them for refinement in sequent iteration cycles. For illustration purposes we created a short video clip in our virtual studio about this project that presents the main ideas in an informative yet entertaining way.}, language = {en} } @article{CohenHerderLMartens1999, author = {Cohen, Michael and Herder, Jens and L. Martens, William}, title = {Cyberspatial Audio Technology}, series = {The Journal of the Acoustical Society of Japan (E)}, volume = {20}, journal = {The Journal of the Acoustical Society of Japan (E)}, number = {6}, doi = {10.1250/ast.20.389}, pages = {389 -- 395}, year = {1999}, abstract = {Cyberspatial audio applications are distinguished from the broad range of spatial audio applications in a number of important ways that help to focus this review. Most significant is that cyberspatial audio is most often designed to be responsive to user inputs. In contrast to non-interactive auditory displays, cyberspatial auditory displays typically allow active exploration of the virtual environment in which users find themselves. Thus, at least some portion of the audio presented in a cyberspatial environment must be selected, processed, or otherwise rendered with minimum delay relative to user input. Besides the technological demands associated with realtime delivery of spatialized sound, the type and quality of auditory experiences supported are also very different from those associated with displays that support stationary sound localization.}, language = {en} } @inproceedings{BeckerHerder2012, author = {Becker, Thomas and Herder, Jens}, title = {Cost effective tangibles using fiducials for infrared multi-touch frames}, series = {15th International Conference on Human and Computer}, booktitle = {15th International Conference on Human and Computer}, address = {Hamamatsu/Aizu-Wakamatsu/Duesseldorf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-16011}, pages = {7}, year = {2012}, abstract = {The late immersion of multi-touch sensitive displays enables the use of tangibles on multi-touch screens. There a several wide spread and/or sophisticated solutions to fulfill this need but they seem to have some flaws. One popular system at the time of writing is an overlay frame that can be placed on a normal display with the corresponding size. The frame creates a grid with infrared light emitting diodes. The disruption of this grid can be detected and messages with the positions are sent via usb to a connected computer. This system is quite robust in matters of ambient light insensitivity and also fast to calibrate. Unfortunately it is not created with the recognition of tangibles in mind and printed patterns can not be resolved. This article summarizes an attempt to create fiducials that are recognized by an infrared multi-touch frame as fingers. Those false fingers are checked by a software for known patterns. Once a known pattern (= fiducial) has been recognized its position and orientation are send with the finger positions towards the interactive software. The usability is tested with an example application where tangibles and finger touches are used in combination.}, language = {en} }