@inproceedings{AmanoMatsushitaYanagawaetal.1996, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Koba, Yoshiharu and Tohyama, Mikio}, title = {The Pioneer sound field control system at the University of Aizu Multimedia Center}, series = {RO-MAN '96 Tsukuba}, booktitle = {RO-MAN '96 Tsukuba}, publisher = {IEEE}, address = {Piscataway}, isbn = {0-7803-3253-9}, doi = {10.1109/ROMAN.1996.568887}, pages = {495 -- 499}, year = {1996}, abstract = {The PSFC, or Pioneer sound field control system, is a DSP-driven hemispherical 14-loudspeaker array, installed at the University of Aizu Multimedia Center. Collocated with a large screen rear-projection stereographic display the PSFC features realtime control of virtual room characteristics and direction of two separate sound channels, smoothly steering them around a configurable soundscape. The PSFC controls an entire sound field, including sound direction, virtual distance, and simulated environment (reverb level, room size and liveness) for each source. It can also configure a dry (DSP-less) switching matrix for direct directionalization. The PSFC speaker dome is about 14 m in diameter, allowing about twenty users at once to comfortably stand or sit near its sweet spot.}, language = {en} } @article{HerderCohen2002, author = {Herder, Jens and Cohen, Michael}, title = {The Helical Keyboard: Perspectives for Spatial Auditory Displays and Visual Music}, series = {Journal of New Music Research}, volume = {31}, journal = {Journal of New Music Research}, number = {3}, pages = {269 -- 281}, year = {2002}, abstract = {Auditory displays with the ability to dynamically spatialize virtual sound sources under real-time conditions enable advanced applications for art and music. A listener can be deeply immersed while interacting and participating in the experience. We review some of those applications while focusing on the Helical Keyboard project and discussing the required technology. Inspired by the cyclical nature of octaves and helical structure of a scale, a model of a piano-style keyboard was prepared, which was then geometrically warped into a helicoidal configuration, one octave/revolution, pitch mapped to height and chroma. It can be driven by MIDI events, real-time or sequenced, which stream is both synthesized and spatialized by a spatial sound display. The sound of the respective notes is spatialized with respect to sinks, avatars of the human user, by default in the tube of the helix. Alternative coloring schemes can be applied, including a color map compatible with chromastereoptic eyewear. The graphical display animates polygons, interpolating between the notes of a chord across the tube of the helix. Recognition of simple chords allows directionalization of all the notes of a major triad from the position of its musical root. The system is designed to allow, for instance, separate audition of harmony and melody, commonly played by the left and right hands, respectively, on a normal keyboard. Perhaps the most exotic feature of the interface is the ability to fork one{\~A}­s presence, replicating subject instead of object by installing multiple sinks at arbitrary places around a virtual scene so that, for example, harmony and melody can be separately spatialized, using two heads to normalize the octave; such a technique effectively doubles the helix from the perspective of a single listener. Rather than a symmetric arrangement of the individual helices, they are perceptually superimposed in-phase, co-extensively, so that corresponding notes in different registers are at the same azimuth.}, language = {en} } @incollection{CohenHerder1998, author = {Cohen, Michael and Herder, Jens}, title = {Symbolic representations of exclude and include for audio sources and sinks: Figurative suggestions of mute/solo \& cue and deafen/confide \& harken}, series = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, booktitle = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, editor = {G{\"o}bel, Martin and Landauer, J{\"u}rgen and Lang, Ulrich and Wapler, Matthias}, publisher = {Springer-Verlag}, address = {Stuttgart}, isbn = {3-211-83233-5}, doi = {10.1007/978-3-7091-7519-4_23}, pages = {235 -- 242}, year = {1998}, language = {en} } @article{RyskeldievCohenHerder2018, author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens}, title = {StreamSpace: Pervasive Mixed Reality Telepresence for Remote Collaboration on Mobile Devices}, series = {Journal of Information Processing}, volume = {26}, journal = {Journal of Information Processing}, publisher = {J-STAGE}, doi = {10.2197/ipsjjip.26.177}, pages = {177 -- 185}, year = {2018}, abstract = {We present a system that exploits mobile rotational tracking and photospherical imagery to allow users to share their environment with remotely connected peers "on the go." We surveyed related interfaces and developed a unique groupware application that shares a mixed reality space with spatially-oriented live video feeds. Users can collaborate through realtime audio, video, and drawings in a virtual space. The developed system was tested in a preliminary user study, which confirmed an increase in spatial and situational awareness among viewers as well as reduction in cognitive workload. Believing that our system provides a novel style of collaboration in mixed reality environments, we discuss future applications and extensions of our prototype.}, subject = {Ubiquitous Computing}, language = {en} } @inproceedings{RyskeldievIgarashiZhangetal.2018, author = {Ryskeldiev, Bektur and Igarashi, Toshiharu and Zhang, Junjian and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Spotility: Crowdsourced Telepresence for Social and Collaborative Experiences in Mobile Mixed Reality}, series = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, booktitle = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-6018-0}, doi = {10.1145/3272973.3274100}, pages = {373 -- 376}, year = {2018}, abstract = {Live video streaming is becoming increasingly popular as a form of interaction in social applications. One of its main advantages is an ability to immediately create and connect a community of remote users on the spot. In this paper we discuss how this feature can be used for crowdsourced completion of simple visual search tasks (such as finding specific objects in libraries and stores, or navigating around live events) and social interactions through mobile mixed reality telepresence interfaces. We present a prototype application that allows users to create a mixed reality space with a photospherical imagery as a background and interact with other connected users through viewpoint, audio, and video sharing, as well as realtime annotations in mixed reality space. Believing in the novelty of our system, we conducted a short series of interviews with industry professionals on the possible applications of our system. We discuss proposed use-cases for user evaluation, as well as outline future extensions of our system.}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Sound Spatialization Resource Management in Virtual Reality Environments}, series = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, booktitle = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, address = {Tokyo}, pages = {407 -- 414}, year = {1997}, abstract = {In a virtual reality environment users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. The sound spatialization resource manager controls sound resources and optimizes fidelity (presence) under given conditions. For that a priority scheme based on human psychophysical hearing is needed. Parameters for spatialization priorities include intensity calculated from volume and distance, orientation in the case of non-uniform radiation patterns, occluding objects, frequency spectrum (low frequencies are harder to localize), expected activity, and others. Objects which are spatially close together (depending on distance and direction) can be mixed. Sources that can not be spatialized can be treated as a single ambient sound source. Important for resource management is the resource assignment, i.e., minimizing swap operations, which makes it desirable to look-ahead and predict upcoming events in a scene. Prediction is achieved by monitoring objects' speed and past evaluation values. Fidelity is contrasted for Zifferent kind of resource restrictions and optimal resource assignment based upon unlimited dynamic scene look-ahead. To give standard and comparable results, the VRML 2.0 specification is used as an application programmer interface. Applicability is demonstrated with a helical keyboard, a polyphonic MIDI stream driven animation including user interaction (user moves around, playing together with programmed notes). The developed sound spatialization resource manager gives improved spatialization fidelity under runtime constraints. Application programmers and virtual reality scene designers are freed from the burden of assigning and predicting the sound sources.}, language = {en} } @inproceedings{CohenHerderMartens2001, author = {Cohen, Michael and Herder, Jens and Martens, William}, title = {Panel: Eartop computing and cyberspatial audio technology}, series = {IEEE-VR2001: IEEE Virtual Reality}, booktitle = {IEEE-VR2001: IEEE Virtual Reality}, publisher = {IEEE}, address = {Yokohama}, isbn = {0-7695-0948-7}, pages = {322 -- 323}, year = {2001}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Enhancing Perspicuity of Objects in Virtual Reality Environments}, series = {Proceedings, Second International Conference on Cognitive Technology}, booktitle = {Proceedings, Second International Conference on Cognitive Technology}, editor = {Gorayska, Barbara and Nehaniv, Chrystopher L. and Marsh, Jonathon P.}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {0-8186-8084-9}, pages = {228 -- 237}, year = {1997}, abstract = {In an information-rich Virtual Reality (VR) environment, the user is immersed in a world containing many objects providing that information. Given the finite computational resources of any computer system, optimization is required to ensure that the most important information is presented to the user as clearly as possible and in a timely fashion. In particular, what is desired are means whereby the perspicuity of an object may be enhanced when appropriate. An object becomes more perspicuous when the information it provides to the user becomes more readily apparent. Additionally, if a particular object provides high-priority information, it would be advantageous to make that object obtrusive as well as highly perspicuous. An object becomes more obtrusive if it draws attention to itself (or equivalently, if it is hard to ignore). This paper describes a technique whereby objects may dynamically adapt their representation in a user's environment according to a dynamic priority evaluation of the information each object provides. The three components of our approach are: - an information manager that evaluates object information priority, - an enhancement manager that tabulates rendering features associated with increasing object perspicuity and obtrusion as a function of priority, and - a resource manager that assigns available object rendering resources according to features indicated by the enhancement manager for the priority set for each object by the information manager. We consider resources like visual space (pixels), sound spatialization channels (mixels), MIDI/audio channels, and processing power, and discuss our approach applied to different applications. Assigned object rendering features are implemented locally at the object level (e.g., object facing the user using the billboard node in VRML 2.0) or globally, using helper applications (e.g., active spotlights, semi-automatic cameras).}, language = {en} } @inproceedings{RyskeldievOchiaiCohenetal.2018, author = {Ryskeldiev, Bektur and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Distributed Metaverse: Creating Decentralized Blockchain-based Model for Peer-to-peer Sharing of Virtual Spaces for Mixed Reality Applications}, series = {Proceedings of the 9th Augmented Human International Conference}, booktitle = {Proceedings of the 9th Augmented Human International Conference}, publisher = {ACM}, isbn = {978-1-4503-5415-8}, doi = {10.1145/3174910.3174952}, pages = {7 -- 9}, year = {2018}, abstract = {Mixed reality telepresence is becoming an increasingly popular form of interaction in social and collaborative applications. We are interested in how created virtual spaces can be archived, mapped, shared, and reused among different applications. Therefore, we propose a decentralized blockchain-based peer-to-peer model of distribution, with virtual spaces represented as blocks. We demonstrate the integration of our system in a collaborative mixed reality application and discuss the benefits and limitations of our approach.}, language = {en} } @inproceedings{HerderCohen1996, author = {Herder, Jens and Cohen, Michael}, title = {Design of a Helical Keyboard}, series = {icad'96 - International Conference on Auditory Display, Palo Alto}, booktitle = {icad'96 - International Conference on Auditory Display, Palo Alto}, address = {Palo Alto}, year = {1996}, abstract = {Inspired by the cyclical nature of octaves and helical structure of a scale (Shepard, '82 and '83), we prepared a model of a piano-style keyboard (prototyped in Mathematica), which was then geometrically warped into a left-handed helical configuration, one octave/revolution, pitch mapped to height. The natural orientation of upper frequency keys higher on the helix suggests a parsimonious left-handed chirality, so that ascending notes cross in front of a typical listener left to right. Our model is being imported (via the dxf file format) into (Open Inventor/)VRML, where it can be driven by MIDI events, realtime or sequenced, which stream is both synthesized (by a Roland Sound Module), and spatialized by a heterogeneous spatial sound backend (including the Crystal River Engineering Acoustetron II and the Pioneer Sound Field Control speaker-array System), so that the sound of the respective notes is directionalized with respect to sinks, avatars of the human user, by default in the tube of the helix. This is a work-in-progress which we hope to be fully functional within the next few months.}, language = {en} } @article{CohenHerderLMartens1999, author = {Cohen, Michael and Herder, Jens and L. Martens, William}, title = {Cyberspatial Audio Technology}, series = {The Journal of the Acoustical Society of Japan (E)}, volume = {20}, journal = {The Journal of the Acoustical Society of Japan (E)}, number = {6}, doi = {10.1250/ast.20.389}, pages = {389 -- 395}, year = {1999}, abstract = {Cyberspatial audio applications are distinguished from the broad range of spatial audio applications in a number of important ways that help to focus this review. Most significant is that cyberspatial audio is most often designed to be responsive to user inputs. In contrast to non-interactive auditory displays, cyberspatial auditory displays typically allow active exploration of the virtual environment in which users find themselves. Thus, at least some portion of the audio presented in a cyberspatial environment must be selected, processed, or otherwise rendered with minimum delay relative to user input. Besides the technological demands associated with realtime delivery of spatialized sound, the type and quality of auditory experiences supported are also very different from those associated with displays that support stationary sound localization.}, language = {en} } @inproceedings{RyskeldievCohenHerder2017, author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens}, title = {Applying rotational tracking and photospherical imagery to immersive mobile telepresence and live video streaming groupware}, series = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, booktitle = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-5410-3}, doi = {10.1145/3132787.3132813}, pages = {2}, year = {2017}, abstract = {Mobile live video streaming is becoming an increasingly popular form of interaction both in social media and remote collaboration scenarios. However, in most cases the streamed video does not take mobile devices' spatial data into account (e.g., the viewers do not know the spatial orientation of a streamer), or use such data only in specific scenarios (e.g., to navigate around a spherical video stream).}, language = {en} } @article{AmanoMatsushitaYanagawaetal.1998, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Martens, William and Koba, Yoshiharu and Tohyama, Mikio}, title = {A Virtual Reality Sound System Using Room-Related Transfer Functions Delivered Through a Multispeaker Array: the PSFC at the University of Aizu Multimedia Center}, series = {TVRSJ}, volume = {3}, journal = {TVRSJ}, number = {1}, publisher = {J-STAGE}, doi = {10.18974/tvrsj.3.1_1}, pages = {1 -- 12}, year = {1998}, abstract = {The PSFC, or Pioneer Sound Field Controller, is a DSP-driven hemispherical loudspeaker array, installed at the University of Aizu Multimedia Center. The PSFC features realtime manipulation of the primary components of sound spatialization for each of two audio sources located in a virtual environment, including the content (apparent direction and distance) and context (room characteristics: reverberation level, room size and liveness). In an alternate mode, it can also direct the destination of the two separate input signals across 14 loudspeakers, manipulating the direction of the virtual sound sources with no control over apparent distance other than that afforded by source loudness (including no simulated environmental reflections or reverberation). The PSFC speaker dome is about 10 m in diameter, accommodating about fifty simultaneous users, including about twenty users comfortably standing or sitting near its ``sweet spot,'' the area in which the illusions of sound spatialization are most vivid. Collocated with a large screen rear-projection stereographic display, the PSFC is intended for advanced multimedia and virtual reality applications.}, language = {en} }