@incollection{DavinHerder2021, author = {Davin, Till and Herder, Jens}, title = {Real-Time Relighting of Video Streams for Augmented Virtuality Scenes}, series = {GI VR / AR Workshop. Gesellschaft f{\"u}r Informatik e.V.}, booktitle = {GI VR / AR Workshop. Gesellschaft f{\"u}r Informatik e.V.}, editor = {Weier, Martin and Bues, Matthias and Wechner, Reto}, publisher = {Gesellschaft f{\"u}r Informatik e.V. (GI)}, address = {Bonn}, doi = {10.18420/vrar2021_6}, publisher = {Hochschule D{\"u}sseldorf}, pages = {16}, year = {2021}, language = {en} } @misc{JaroschHerderLangmann2022, author = {Jarosch, Monika and Herder, Jens and Langmann, Mathias}, title = {Entwicklung einer AR-Applikation zur kosteneffektiven volumetrischen Erfassung von Baugruben}, series = {gis.Science}, volume = {2022}, journal = {gis.Science}, number = {2}, issn = {2698-4571}, pages = {75 -- 83}, year = {2022}, abstract = {Die volumetrische Erfassung von Aush{\"u}ben auf Baustellen ist ein kostenrelevanter Faktor und wird auch heute im t{\"a}glichen Baustellenbetrieb oft noch in manueller Detailarbeit durchgef{\"u}hrt. Kosteng{\"u}nstige Sensoren zur Tiefenerfassung erm{\"o}glichen die halbautomatische Erfassung von Baugruben. Augmented Reality (AR) kann f{\"u}r diesen Prozess das n{\"o}tige Feedback liefern. Vorgestellt wird ein Prototyp, bestehend aus einem Tablet mit integrierter Kamera und einem Lidar-Scanner. Es wird die Erfassung des Volumens bez{\"u}glich Nutzbarkeit und Genauigkeit mit Einsatz von AR getestet und evaluiert. Zur Bestimmung des Volumens wird unter Verwendung von Strahlen mit Unterst{\"u}tzung einer Grafik-Engine ein Algorithmus entwickelt. Der Algorithmus ist robust gegen nicht vollst{\"a}ndig geschlossene Volumen. Die Bedienung, {\"U}berpr{\"u}fung und Visualisierung findet durch praktischen Einsatz von AR statt.}, language = {de} } @article{RyskeldievCohenHerder2018, author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens}, title = {StreamSpace: Pervasive Mixed Reality Telepresence for Remote Collaboration on Mobile Devices}, series = {Journal of Information Processing}, volume = {26}, journal = {Journal of Information Processing}, publisher = {J-STAGE}, doi = {10.2197/ipsjjip.26.177}, pages = {177 -- 185}, year = {2018}, abstract = {We present a system that exploits mobile rotational tracking and photospherical imagery to allow users to share their environment with remotely connected peers "on the go." We surveyed related interfaces and developed a unique groupware application that shares a mixed reality space with spatially-oriented live video feeds. Users can collaborate through realtime audio, video, and drawings in a virtual space. The developed system was tested in a preliminary user study, which confirmed an increase in spatial and situational awareness among viewers as well as reduction in cognitive workload. Believing that our system provides a novel style of collaboration in mixed reality environments, we discuss future applications and extensions of our prototype.}, subject = {Ubiquitous Computing}, language = {en} } @inproceedings{HerderBrettschneiderdeMooijetal.2019, author = {Herder, Jens and Brettschneider, Nico and de Mooij, Jeroen and Ryskeldiev, Bektur}, title = {Avatars for Co-located Collaborations in HMD-based Virtual Environments}, series = {IEEE VR 2019, 26th IEEE Conference on Virtual Reality and 3D User Interfaces, Osaka, March, 2019}, booktitle = {IEEE VR 2019, 26th IEEE Conference on Virtual Reality and 3D User Interfaces, Osaka, March, 2019}, publisher = {IEEE}, address = {Osaka}, doi = {10.1109/VR.2019.8798132}, pages = {968 -- 969}, year = {2019}, abstract = {Multi-user virtual reality is transforming towards a social activity that is no longer only used by remote users, but also in large-scale location-based experiences. Usage of realtime-tracked avatars in co-located business-oriented applications with a "guide-user-scenario" is examined for user-related factors of Spatial Presence, Social Presence, User Experience and Task Load. A user study was conducted in order to compare both techniques of a realtime-tracked avatar and a non-visualised guide. Results reveal that the avatar-guide enhanced and stimulated communicative processes while facilitating interaction possibilities and creating a higher sense of mental immersion for users and engagement.}, language = {en} } @inproceedings{HerderTakedaVermeegenetal.2019, author = {Herder, Jens and Takeda, Shinpei and Vermeegen, Kai and Davin, Till and Berners, Dominique and Ryskeldiev, Bektur and Zimmer, Christian and Druzetic, Ivana and Geiger, Christian}, title = {Mixed Reality Art Experiments - Immersive Access to Collective Memories}, series = {ISEA2019, Proceedings, 25th International Symposium on Electronic Art, Gwangju, South Korea, June 22-28, 2019}, booktitle = {ISEA2019, Proceedings, 25th International Symposium on Electronic Art, Gwangju, South Korea, June 22-28, 2019}, publisher = {IESA}, address = {Gwangju}, pages = {334 -- 341}, year = {2019}, abstract = {We report about several experiments on applying mixed reality technology in the context of accessing collective memories from atomic bombs, Holocaust and Second World War. We discuss the impact of Virtual Reality, Augmented Virtuality and Augmented Reality for specific memorial locations. We show how to use a virtual studio for demonstrating an augmented reality application for a specific location in a remote session within a video conference. Augmented Virtuality is used to recreate the local environment, thus providing a context and helping the participants recollect emotions related to a certain place. This technique demonstrates the advantages of using virtual (VR) and augmented (AR) reality environments for rapid prototyping and pitching project ideas in a live remote setting.}, language = {en} } @inproceedings{HerderCohen1996, author = {Herder, Jens and Cohen, Michael}, title = {Design of a Helical Keyboard}, series = {icad'96 - International Conference on Auditory Display, Palo Alto}, booktitle = {icad'96 - International Conference on Auditory Display, Palo Alto}, address = {Palo Alto}, year = {1996}, abstract = {Inspired by the cyclical nature of octaves and helical structure of a scale (Shepard, '82 and '83), we prepared a model of a piano-style keyboard (prototyped in Mathematica), which was then geometrically warped into a left-handed helical configuration, one octave/revolution, pitch mapped to height. The natural orientation of upper frequency keys higher on the helix suggests a parsimonious left-handed chirality, so that ascending notes cross in front of a typical listener left to right. Our model is being imported (via the dxf file format) into (Open Inventor/)VRML, where it can be driven by MIDI events, realtime or sequenced, which stream is both synthesized (by a Roland Sound Module), and spatialized by a heterogeneous spatial sound backend (including the Crystal River Engineering Acoustetron II and the Pioneer Sound Field Control speaker-array System), so that the sound of the respective notes is directionalized with respect to sinks, avatars of the human user, by default in the tube of the helix. This is a work-in-progress which we hope to be fully functional within the next few months.}, language = {en} } @article{MartensHerder1999, author = {Martens, William L. and Herder, Jens}, title = {Perceptual criteria for eliminating reflectors and occluders for efficient rendering of environmental sound}, series = {The Journal of the Acoustical Society of America}, volume = {105}, journal = {The Journal of the Acoustical Society of America}, number = {2}, doi = {10.1121/1.425349}, pages = {979}, year = {1999}, language = {en} } @article{MartensHerderShiba1999, author = {Martens, William L. and Herder, Jens and Shiba, Yoshiki}, title = {A filtering model for efficient rendering of the spatial image of an occluded virtual sound source}, series = {The Journal of the Acoustical Society of America}, volume = {105}, journal = {The Journal of the Acoustical Society of America}, number = {2}, doi = {10.1121/1.425354}, pages = {980}, year = {1999}, abstract = {Rendering realistic spatial sound imagery for complex virtual environments must take into account the effects of obstructions such as reflectors and occluders. It is relatively well understood how to calculate the acoustical consequence that would be observed at a given observation point when an acoustically opaque object occludes a sound source. But the interference patterns generated by occluders of various geometries and orientations relative to the virtual source and receiver are computationally intense if accurate results are required. In many applications, however, it is sufficient to create a spatial image that is recognizable by the human listener as the sound of an occluded source. In the interest of improving audio rendering efficiency, a simplified filtering model was developed and its audio output submitted to psychophysical evaluation. Two perceptually salient components of occluder acoustics were identified that could be directly related to the geometry and orientation of a simple occluder. Actual occluder impulse responses measured in an anechoic chamber resembled the responses of a model incorporating only a variable duration delay line and a low-pass filter with variable cutoff frequency.}, language = {en} } @inproceedings{Herder2001, author = {Herder, Jens}, title = {Interactive Content Creation with Virtual Set Environments}, series = {Fourth International Conference on Human and Computer}, booktitle = {Fourth International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, year = {2001}, abstract = {Digital broadcasting enables interactive \sc tv, which presents new challenges for interactive content creation. Besides the technology for streaming and viewing, tools and systems are under development that extend traditional \sc tv studios with virtual set environments. This presentation reviews current technology and describes the requirements for such systems. Interoperability over the production, streaming, and viewer levels requires open interfaces. As the technology allow more interaction, it becomes inherent difficult to control the quality of the viewers experience}, language = {en} } @incollection{Herder2006, author = {Herder, Jens}, title = {Matching Light for Virtual Studio TV Productions}, series = {9th International Conference on Human and Computer}, booktitle = {9th International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {158 -- 162}, year = {2006}, abstract = {High dynamic range environments maps based on still images or video streams are used for computer animation or interactive systems. The task of realistic light setup of scenes using captured environment maps might be eased as well as the visual quality improves. In this article, we discuss the light setting problem for virtual studio (tv) layout and system become more complex to handle this new feature of studio light capturing. The analysis of system requirements identifies the technical challenges.}, language = {en} }