@inproceedings{RyskeldievCohenHerder2017, author = {Ryskeldiev, Bektur and Cohen, Michael and Herder, Jens}, title = {Applying rotational tracking and photospherical imagery to immersive mobile telepresence and live video streaming groupware}, series = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, booktitle = {Proceeding SA '17 SIGGRAPH Asia 2017 Mobile Graphics \& Interactive Applications, Article No. 5}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-5410-3}, doi = {10.1145/3132787.3132813}, pages = {2}, year = {2017}, abstract = {Mobile live video streaming is becoming an increasingly popular form of interaction both in social media and remote collaboration scenarios. However, in most cases the streamed video does not take mobile devices' spatial data into account (e.g., the viewers do not know the spatial orientation of a streamer), or use such data only in specific scenarios (e.g., to navigate around a spherical video stream).}, language = {en} } @inproceedings{RyskeldievIgarashiZhangetal.2018, author = {Ryskeldiev, Bektur and Igarashi, Toshiharu and Zhang, Junjian and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Spotility: Crowdsourced Telepresence for Social and Collaborative Experiences in Mobile Mixed Reality}, series = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, booktitle = {ACM Conference on Computer Supported Cooperative Work and Social Computing (CSCW '18)}, publisher = {ACM}, address = {New York}, isbn = {978-1-4503-6018-0}, doi = {10.1145/3272973.3274100}, pages = {373 -- 376}, year = {2018}, abstract = {Live video streaming is becoming increasingly popular as a form of interaction in social applications. One of its main advantages is an ability to immediately create and connect a community of remote users on the spot. In this paper we discuss how this feature can be used for crowdsourced completion of simple visual search tasks (such as finding specific objects in libraries and stores, or navigating around live events) and social interactions through mobile mixed reality telepresence interfaces. We present a prototype application that allows users to create a mixed reality space with a photospherical imagery as a background and interact with other connected users through viewpoint, audio, and video sharing, as well as realtime annotations in mixed reality space. Believing in the novelty of our system, we conducted a short series of interviews with industry professionals on the possible applications of our system. We discuss proposed use-cases for user evaluation, as well as outline future extensions of our system.}, language = {en} } @inproceedings{RyskeldievOchiaiCohenetal.2018, author = {Ryskeldiev, Bektur and Ochiai, Yoichi and Cohen, Michael and Herder, Jens}, title = {Distributed Metaverse: Creating Decentralized Blockchain-based Model for Peer-to-peer Sharing of Virtual Spaces for Mixed Reality Applications}, series = {Proceedings of the 9th Augmented Human International Conference}, booktitle = {Proceedings of the 9th Augmented Human International Conference}, publisher = {ACM}, isbn = {978-1-4503-5415-8}, doi = {10.1145/3174910.3174952}, pages = {7 -- 9}, year = {2018}, abstract = {Mixed reality telepresence is becoming an increasingly popular form of interaction in social and collaborative applications. We are interested in how created virtual spaces can be archived, mapped, shared, and reused among different applications. Therefore, we propose a decentralized blockchain-based peer-to-peer model of distribution, with virtual spaces represented as blocks. We demonstrate the integration of our system in a collaborative mixed reality application and discuss the benefits and limitations of our approach.}, language = {en} } @inproceedings{HerderCohen1996, author = {Herder, Jens and Cohen, Michael}, title = {Design of a Helical Keyboard}, series = {icad'96 - International Conference on Auditory Display, Palo Alto}, booktitle = {icad'96 - International Conference on Auditory Display, Palo Alto}, address = {Palo Alto}, year = {1996}, abstract = {Inspired by the cyclical nature of octaves and helical structure of a scale (Shepard, '82 and '83), we prepared a model of a piano-style keyboard (prototyped in Mathematica), which was then geometrically warped into a left-handed helical configuration, one octave/revolution, pitch mapped to height. The natural orientation of upper frequency keys higher on the helix suggests a parsimonious left-handed chirality, so that ascending notes cross in front of a typical listener left to right. Our model is being imported (via the dxf file format) into (Open Inventor/)VRML, where it can be driven by MIDI events, realtime or sequenced, which stream is both synthesized (by a Roland Sound Module), and spatialized by a heterogeneous spatial sound backend (including the Crystal River Engineering Acoustetron II and the Pioneer Sound Field Control speaker-array System), so that the sound of the respective notes is directionalized with respect to sinks, avatars of the human user, by default in the tube of the helix. This is a work-in-progress which we hope to be fully functional within the next few months.}, language = {en} } @inproceedings{CohenHerderMartens2001, author = {Cohen, Michael and Herder, Jens and Martens, William}, title = {Panel: Eartop computing and cyberspatial audio technology}, series = {IEEE-VR2001: IEEE Virtual Reality}, booktitle = {IEEE-VR2001: IEEE Virtual Reality}, publisher = {IEEE}, address = {Yokohama}, isbn = {0-7695-0948-7}, pages = {322 -- 323}, year = {2001}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Enhancing Perspicuity of Objects in Virtual Reality Environments}, series = {Proceedings, Second International Conference on Cognitive Technology}, booktitle = {Proceedings, Second International Conference on Cognitive Technology}, editor = {Gorayska, Barbara and Nehaniv, Chrystopher L. and Marsh, Jonathon P.}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {0-8186-8084-9}, pages = {228 -- 237}, year = {1997}, abstract = {In an information-rich Virtual Reality (VR) environment, the user is immersed in a world containing many objects providing that information. Given the finite computational resources of any computer system, optimization is required to ensure that the most important information is presented to the user as clearly as possible and in a timely fashion. In particular, what is desired are means whereby the perspicuity of an object may be enhanced when appropriate. An object becomes more perspicuous when the information it provides to the user becomes more readily apparent. Additionally, if a particular object provides high-priority information, it would be advantageous to make that object obtrusive as well as highly perspicuous. An object becomes more obtrusive if it draws attention to itself (or equivalently, if it is hard to ignore). This paper describes a technique whereby objects may dynamically adapt their representation in a user's environment according to a dynamic priority evaluation of the information each object provides. The three components of our approach are: - an information manager that evaluates object information priority, - an enhancement manager that tabulates rendering features associated with increasing object perspicuity and obtrusion as a function of priority, and - a resource manager that assigns available object rendering resources according to features indicated by the enhancement manager for the priority set for each object by the information manager. We consider resources like visual space (pixels), sound spatialization channels (mixels), MIDI/audio channels, and processing power, and discuss our approach applied to different applications. Assigned object rendering features are implemented locally at the object level (e.g., object facing the user using the billboard node in VRML 2.0) or globally, using helper applications (e.g., active spotlights, semi-automatic cameras).}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Sound Spatialization Resource Management in Virtual Reality Environments}, series = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, booktitle = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, address = {Tokyo}, pages = {407 -- 414}, year = {1997}, abstract = {In a virtual reality environment users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. The sound spatialization resource manager controls sound resources and optimizes fidelity (presence) under given conditions. For that a priority scheme based on human psychophysical hearing is needed. Parameters for spatialization priorities include intensity calculated from volume and distance, orientation in the case of non-uniform radiation patterns, occluding objects, frequency spectrum (low frequencies are harder to localize), expected activity, and others. Objects which are spatially close together (depending on distance and direction) can be mixed. Sources that can not be spatialized can be treated as a single ambient sound source. Important for resource management is the resource assignment, i.e., minimizing swap operations, which makes it desirable to look-ahead and predict upcoming events in a scene. Prediction is achieved by monitoring objects' speed and past evaluation values. Fidelity is contrasted for Zifferent kind of resource restrictions and optimal resource assignment based upon unlimited dynamic scene look-ahead. To give standard and comparable results, the VRML 2.0 specification is used as an application programmer interface. Applicability is demonstrated with a helical keyboard, a polyphonic MIDI stream driven animation including user interaction (user moves around, playing together with programmed notes). The developed sound spatialization resource manager gives improved spatialization fidelity under runtime constraints. Application programmers and virtual reality scene designers are freed from the burden of assigning and predicting the sound sources.}, language = {en} } @inproceedings{AmanoMatsushitaYanagawaetal.1996, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Koba, Yoshiharu and Tohyama, Mikio}, title = {The Pioneer sound field control system at the University of Aizu Multimedia Center}, series = {RO-MAN '96 Tsukuba}, booktitle = {RO-MAN '96 Tsukuba}, publisher = {IEEE}, address = {Piscataway}, isbn = {0-7803-3253-9}, doi = {10.1109/ROMAN.1996.568887}, pages = {495 -- 499}, year = {1996}, abstract = {The PSFC, or Pioneer sound field control system, is a DSP-driven hemispherical 14-loudspeaker array, installed at the University of Aizu Multimedia Center. Collocated with a large screen rear-projection stereographic display the PSFC features realtime control of virtual room characteristics and direction of two separate sound channels, smoothly steering them around a configurable soundscape. The PSFC controls an entire sound field, including sound direction, virtual distance, and simulated environment (reverb level, room size and liveness) for each source. It can also configure a dry (DSP-less) switching matrix for direct directionalization. The PSFC speaker dome is about 14 m in diameter, allowing about twenty users at once to comfortably stand or sit near its sweet spot.}, language = {en} }