@misc{Struchholz2005, author = {Struchholz, Holger}, title = {Interaktive Visualisierung und Abh{\"o}rung zur Klangabstrahlung von Musikinstrumenten - Interpolation und Filterung mehrkanaliger Aufnahmen unter Verwendung der geometrischen Relationen}, address = {D{\"u}sseldorf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-1727}, pages = {147}, year = {2005}, abstract = {Ziel des Projektes ist die Entwicklung einer virtuellen Umgebung, die das charakteristische Klangabstrahlverhalten eines Musikinstruments in Echtzeit erfahrbar macht. Es wird eine virtuelle Umgebung geschaffen, in der sich der Benutzer frei um ein Musikinstrument bewegen kann und in Echtzeit ein akustisches und visuelles Feedback erh{\"a}lt. Durch die Verbindung der auditiven und visuellen Elemente und die M{\"o}glichkeit der Interaktion, wird das Erleben und die Wahrnehmung der Effekte intensiviert. Die Simulation des charakteristischen Klangabstrahlverhaltens erfolgt nicht durch eine rechenaufw{\"a}ndige Klangsynthese wie z.B. Physical Modeling, sondern basiert auf der Lautst{\"a}rkeinterpolation einer Mehrkanalaufnahme. Die Verwendung der realen Aufnahmen erm{\"o}glicht eine ann{\"a}hernd naturgetreue Abbildung des Klangabstrahlverhaltens und ist, im Gegensatz zu rechenaufw{\"a}ndigen Klangsyntheseverfahren, echtzeitf{\"a}hig. Zus{\"a}tzlich wurde ein einfacher Filter entwickelt, der das charakteristische Klangabstrahlverhalten des Instruments eher qualitativ simuliert und sich problemlos in Echtzeit 3D-Anwendungen implementieren l{\"a}sst. Die beiden entwickelten Methoden zur Simulation des Klangabstrahlverhaltens wurden mittels Spektralanalyse und anhand eines durchgef{\"u}hrten H{\"o}rtests auf ihre Funktionalit{\"a}t und ihre G{\"u}ltigkeit {\"u}berpr{\"u}ft.}, language = {de} } @inproceedings{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, booktitle = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, address = {Aizu}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-788}, pages = {6}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling.}, language = {en} } @inproceedings{Herder1997, author = {Herder, Jens}, title = {Tools and widgets for spatial sound authoring}, series = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, booktitle = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, address = {Portugal}, isbn = {972-8342-02-0}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-896}, pages = {87 -- 95}, year = {1997}, abstract = {Broader use of virtual reality environments and sophisticated animations spawn a need for spatial sound. Until now, spatial sound design has been based very much on experience and trial and error. Most effects are hand-crafted, because good design tools for spatial sound do not exist. This paper discusses spatial sound authoring and its applications, including shared virtual reality environments based on VRML. New utilities introduced by this research are an inspector for sound sources, an interactive resource manager, and a visual soundscape manipulator. The tools are part of a sound spatialization framework and allow a designer/author of multimedia content to monitor and debug sound events. Resource constraints like limited sound spatialization channels can also be simulated.}, language = {en} } @article{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {Journal of the 3D-Forum Society}, volume = {12}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {17 -- 22}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling. Keywords: sound spatialization, resource management, virtual environments, spatial sound authoring, user interface design, human-machine interfaces}, language = {en} } @inproceedings{HerderWilkeHeimbachetal.2009, author = {Herder, Jens and Wilke, Michael and Heimbach, Julia and G{\"o}bel, Sebastian and Marinos, Dionysios}, title = {Simple Actor Tracking for Virtual TV Studios Using a Photonic Mixing Device}, series = {12th International Conference on Human and Computer}, booktitle = {12th International Conference on Human and Computer}, address = {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf}, year = {2009}, abstract = {Virtual TV studios use actor tracking systems for resolving the occlusion of computer graphics and studio camera image. The actor tracking delivers the distance between actor and studio camera. We deploy a photonic mixing device, which captures a depth map and a luminance image at low resolution. The renderer engines gets one depth value per actor using the OSC protocol. We describe the actor recognition algorithm based on the luminance image and the depth value calculation. We discuss technical issues like noise and calibration.}, language = {en} } @inproceedings{GarbeHerbstHerder2007, author = {Garbe, Katharina and Herbst, Iris and Herder, Jens}, title = {Spatial Audio for Augmented Reality}, series = {10th International Conference on Human and Computer}, booktitle = {10th International Conference on Human and Computer}, address = {D{\"u}sseldorf, Aizu-Wakamatsu}, pages = {53 -- 58}, year = {2007}, abstract = {Using spatial audio successfully for augmented reality (AR) applications is a challenge, but is awarded with an improved user experience. Thus, we have extended the AR/VR framework \sc Morgan with spatial audio to improve users orientation in an AR application. In this paper, we investigate the users' capability to localize and memorize spatial sounds (registered with virtual or real objects). We discuss two scenarios. In the first scenario, the user localizes only sound sources and in the second scenario the user memorizes the location of audio-visual objects. Our results reflect spatial audio performance within the application domain and show which technology pitfalls still exist. Finally, we provide design recommendations for spatial audio AR environments.}, language = {en} } @article{vonBergSteffensWeinzierletal.2021, author = {von Berg, Markus and Steffens, Jochen and Weinzierl, Stefan and M{\"u}llensiefen, Daniel}, title = {Assessing room acoustic listening expertise}, series = {Journal of the Acoustical Society of America}, volume = {150}, journal = {Journal of the Acoustical Society of America}, number = {4}, publisher = {Acoustical Society of America}, doi = {10.1121/10.0006574}, pages = {2539-2548}, year = {2021}, abstract = {Musicians and music professionals are often considered to be expert listeners for listening tests on room acoustics. However, these tests often target acoustic parameters other than those typically relevant in music such as pitch, rhythm, amplitude, or timbre. To assess the expertise in perceiving and understanding room acoustical phenomena, a listening test battery was constructed to measure the perceptual sensitivity and cognitive abilities in the identification of rooms with different reverberation times and different spectral envelopes. Performance in these tests was related to data from the Goldsmiths Musical Sophistication Index, self-reported previous experience in music recording and acoustics, and academic knowledge on acoustics. The data from 102 participants show that sensory and cognitive abilities are both correlated significantly with musical training, analytic listening skills, recording experience, and academic knowledge on acoustics, whereas general interest in and engagement with music do not show any significant correlations. The regression models, using only significantly correlated criteria of musicality and professional expertise, explain only small to moderate amounts (11\%-28\%) of the variance in the "room acoustic listening expertise" across the different tasks of the battery. Thus, the results suggest that the traditional criteria for selecting expert listeners in room acoustics are only weak predictors of their actual performances.}, language = {en} } @article{LuizardSteffensWeinzierl2020, author = {Luizard, Paul and Steffens, Jochen and Weinzierl, Stefan}, title = {Singing in different rooms: Common or individual adaptation patterns to the acoustic conditions?}, series = {The Journal of the Acoustical Society of America}, volume = {147}, journal = {The Journal of the Acoustical Society of America}, number = {2}, publisher = {ASA}, doi = {10.1121/10.0000715}, year = {2020}, language = {en} } @article{SteffensMuellerSchulzetal.2020, author = {Steffens, Jochen and M{\"u}ller, Franz and Schulz, Melanie and Gibson, Samuel}, title = {The effect of inattention and cognitive load on unpleasantness judgments of environmental sounds}, series = {Applied Acoustics}, volume = {164}, journal = {Applied Acoustics}, publisher = {Elsevier}, issn = {0003-682X}, doi = {10.1016/j.apacoust.2020.107278}, year = {2020}, language = {en} } @techreport{Vogel2020, author = {Vogel, Peter}, title = {Investieren unter Nebenbedingungen - Teil 4: Erg{\"a}nzung zu Teil 3}, publisher = {Hochschule D{\"u}sseldorf}, address = {D{\"u}sseldorf}, organization = {Hochschule D{\"u}sseldorf}, issn = {2567-2347}, doi = {10.20385/2567-2347/2020.4}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-21206}, year = {2020}, abstract = {Der vierte Teil der Schriftenreihe "Trading" pr{\"a}sentiert eine statistische Auswertung von Kursdaten bei einer Haltefrist von einem Handelstag. Die Statistik beinhaltet Gewinnfaktor bzw. Rendite und Investitionsgrad aus Teil 1 sowie Volatilit{\"a}t, Chance, Risiko und Handelskosten, welche in Teil 3 eingef{\"u}hrt wurden. Kaufbedingungen werden wie in Teil 1 mit Hilfe des Kursverh{\"a}ltnisses an zwei aufeinander folgenden Handelstagen gebildet. Die Auswertung f{\"u}r den Markt FEBRDUSA_1 historischer Kursdaten liefert eine in sich schl{\"u}ssige Beschreibung des Einflussfaktors Kaufbedingung auf die k{\"u}nftige Kursentwicklung. Den gr{\"o}ßten Gewinn liefert eine Mean Reversion-Strategie, bei der die Chance deutlich {\"u}ber dem Risiko liegt. Da die Statistik die Begrenzung eingegangener Neuinvestitionen ber{\"u}cksichtigt, werden {\"U}bertreibungen vermieden, die einer arithmetischen Mittelwertbildung anhaften. Die statische Auswertung f{\"u}r einen mit Random Walks gebildeten Markt best{\"a}tigt, dass f{\"u}r diesen Fall keine Abh{\"a}ngigkeit von der Kaufbedingung besteht.}, language = {de} } @techreport{VogelBlaettermann2021, author = {Vogel, Peter and Bl{\"a}ttermann, Patrick}, title = {Investieren unter Nebenbedingungen - Teil 5: Auswahl von Aktien, Datenqualit{\"a}t und Statistik-Ergebnisse}, address = {D{\"u}sseldorf}, organization = {Hochschule D{\"u}sseldorf}, issn = {2567-2347}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-28848}, year = {2021}, language = {de} } @article{HerderCohen2002, author = {Herder, Jens and Cohen, Michael}, title = {The Helical Keyboard: Perspectives for Spatial Auditory Displays and Visual Music}, series = {Journal of New Music Research}, volume = {31}, journal = {Journal of New Music Research}, number = {3}, pages = {269 -- 281}, year = {2002}, abstract = {Auditory displays with the ability to dynamically spatialize virtual sound sources under real-time conditions enable advanced applications for art and music. A listener can be deeply immersed while interacting and participating in the experience. We review some of those applications while focusing on the Helical Keyboard project and discussing the required technology. Inspired by the cyclical nature of octaves and helical structure of a scale, a model of a piano-style keyboard was prepared, which was then geometrically warped into a helicoidal configuration, one octave/revolution, pitch mapped to height and chroma. It can be driven by MIDI events, real-time or sequenced, which stream is both synthesized and spatialized by a spatial sound display. The sound of the respective notes is spatialized with respect to sinks, avatars of the human user, by default in the tube of the helix. Alternative coloring schemes can be applied, including a color map compatible with chromastereoptic eyewear. The graphical display animates polygons, interpolating between the notes of a chord across the tube of the helix. Recognition of simple chords allows directionalization of all the notes of a major triad from the position of its musical root. The system is designed to allow, for instance, separate audition of harmony and melody, commonly played by the left and right hands, respectively, on a normal keyboard. Perhaps the most exotic feature of the interface is the ability to fork one{\~A}­s presence, replicating subject instead of object by installing multiple sinks at arbitrary places around a virtual scene so that, for example, harmony and melody can be separately spatialized, using two heads to normalize the octave; such a technique effectively doubles the helix from the perspective of a single listener. Rather than a symmetric arrangement of the individual helices, they are perceptually superimposed in-phase, co-extensively, so that corresponding notes in different registers are at the same azimuth.}, language = {en} } @article{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Journal of the 3D-Forum Society}, volume = {14}, journal = {Journal of the 3D-Forum Society}, number = {4}, pages = {146 -- 151}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field, including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness) for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @inproceedings{CohenHerderMartens2001, author = {Cohen, Michael and Herder, Jens and Martens, William}, title = {Panel: Eartop computing and cyberspatial audio technology}, series = {IEEE-VR2001: IEEE Virtual Reality}, booktitle = {IEEE-VR2001: IEEE Virtual Reality}, publisher = {IEEE}, address = {Yokohama}, isbn = {0-7695-0948-7}, pages = {322 -- 323}, year = {2001}, language = {en} } @article{HerderYamazaki2000, author = {Herder, Jens and Yamazaki, Yasuhiro}, title = {A Chatspace Deploying Spatial Audio for Enhanced Conferencing}, series = {Journal of the 3D-Forum Society}, volume = {15}, journal = {Journal of the 3D-Forum Society}, number = {1}, year = {2000}, language = {en} } @inproceedings{HonnoSuzukiHerder2000, author = {Honno, Kuniaki and Suzuki, Kenji and Herder, Jens}, title = {Distance and Room Effects Control for the PSFC, an Auditory Display using a Loudspeaker Array}, series = {Third International Conference on Human and Computer}, booktitle = {Third International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {71 -- 76}, year = {2000}, abstract = {The Pioneer Sound Field Controller (PSFC), a loudspeaker array system, features realtime configuration of an entire sound field,including sound source direction, virtual distance, and context of simulated environment (room characteristics: room size and liveness)for each of two sound sources. In the PSFC system, there is no native parameter to specify the distance between the sound source and sound sink (listener) and also no function to control it directrly. This paper suggests the method to control virtual distance using basic parameters: volume, room size and liveness. The implementation of distance cue is an important aspect of 3D sounds. Virtual environments supporting room effects like reverberation not only gain realism but also provide additional information to users about surrounding space. The context switch of different aural attributes is done by using an API of the Sound Spatialization Framework. Therefore, when the sound sink move through two rooms, like a small bathroom and a large living room, the context of the sink switches and different sound is obtained.}, language = {en} } @inproceedings{Herder1999, author = {Herder, Jens}, title = {Optimization of Sound Spatialization Resource Management through Clustering}, series = {Second International Conference on Human and Computer}, booktitle = {Second International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {1 -- 7}, year = {1999}, abstract = {Level-of-detail is a concept well-known in computer graphics to reduce the number of rendered polygons. Depending on the distance to the subject (viewer), the objects' representation is changed. A similar concept is the clustering of sound sources for sound spatialization. Clusters can be used to hierarchically organize mixels and to optimize the use of resources, by grouping multiple sources together into a single representative ource. Such a clustering process should minimize the error of position allocation of elements, perceived as angle and distance, and also differences between velocity relative to the sink (i.e., Doppler shift). Objects with similar direction of motion and speed (relative to sink) in the same acoustic resolution cone and with similar distance to a sink can be grouped together.}, language = {en} } @article{JensHerder1999, author = {Jens Herder,}, title = {Optimization of Sound Spatialization Resource Management through Clustering}, series = {Journal of the 3D-Forum Society}, volume = {13}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {59 -- 65}, year = {1999}, abstract = {Level-of-detail is a concept well-known in computer graphics to reduce the number of rendered polygons. Depending on the distance to the subject (viewer), the objects' representation is changed. A similar concept is the clustering of sound sources for sound spatialization. Clusters can be used to hierarchically organize mixelsand to optimize the use of resources, by grouping multiple sources together into a single representative source. Such a clustering process should minimize the error of position allocation of elements, perceived as angle and distance, and also differences between velocity relative to the sink (i.e., Doppler shift). Objects with similar direction of motion and speed (relative to sink) in the same acoustic resolution cone and with similar distance to a sink can be grouped together.}, language = {de} } @misc{Herder2000, author = {Herder, Jens}, title = {Interactive Sound Spatialization - a Primer}, series = {MM News, University of Aizu Multimedia Center}, volume = {8}, journal = {MM News, University of Aizu Multimedia Center}, pages = {8 -- 12}, year = {2000}, abstract = {Sound spatialization is a technology which puts sound into the three dimensional space, so that it has a perceivable direction and distance. Interactive means mutually or reciprocally active. Interaction is when one action (e.g., user moves mouse) has direct or immediate influence to other actions (e.g., processing by a computer: graphics change in size). Based on this definition an introduction to sound reproduction using DVD and virtual environments is given and illustrated by applications (e.g., virtual converts).}, language = {mul} } @article{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Journal of the 3D-Forum Society}, volume = {13}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {66 -- 70}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} } @inproceedings{YamazakiHerder2000, author = {Yamazaki, Yasuhiro and Herder, Jens}, title = {Exploring Spatial Audio Conferencing Functionality in Multiuser Virtual Environments}, series = {The Third International Conference on Collaborative Virtual Environments}, booktitle = {The Third International Conference on Collaborative Virtual Environments}, publisher = {ACM}, address = {San Francisco}, pages = {207 -- 208}, year = {2000}, abstract = {A chatspace was developed that allows conversation with 3D sound using networked streaming in a shared virtual environment. The system provides an interface to advanced audio features, such as a "whisper function" for conveying a confided audio stream. This study explores the use of spatial audio to enhance a user's experience in multiuser virtual environments.}, language = {en} } @inproceedings{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Second International Conference on Human and Computer}, booktitle = {Second International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {1 -- 5}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} } @inproceedings{HerderYamazaki2000, author = {Herder, Jens and Yamazaki, Yasuhiro}, title = {A Chatspace Deploying Spatial Audio for Enhanced Conferencing}, series = {Third International Conference on Human and Computer}, booktitle = {Third International Conference on Human and Computer}, publisher = {University of Aizu}, address = {Aizu-Wakamatsu}, pages = {197 -- 202}, year = {2000}, language = {en} } @article{AmanoMatsushitaYanagawaetal.1998, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Martens, William and Koba, Yoshiharu and Tohyama, Mikio}, title = {A Virtual Reality Sound System Using Room-Related Transfer Functions Delivered Through a Multispeaker Array: the PSFC at the University of Aizu Multimedia Center}, series = {TVRSJ}, volume = {3}, journal = {TVRSJ}, number = {1}, publisher = {J-STAGE}, doi = {10.18974/tvrsj.3.1_1}, pages = {1 -- 12}, year = {1998}, abstract = {The PSFC, or Pioneer Sound Field Controller, is a DSP-driven hemispherical loudspeaker array, installed at the University of Aizu Multimedia Center. The PSFC features realtime manipulation of the primary components of sound spatialization for each of two audio sources located in a virtual environment, including the content (apparent direction and distance) and context (room characteristics: reverberation level, room size and liveness). In an alternate mode, it can also direct the destination of the two separate input signals across 14 loudspeakers, manipulating the direction of the virtual sound sources with no control over apparent distance other than that afforded by source loudness (including no simulated environmental reflections or reverberation). The PSFC speaker dome is about 10 m in diameter, accommodating about fifty simultaneous users, including about twenty users comfortably standing or sitting near its ``sweet spot,'' the area in which the illusions of sound spatialization are most vivid. Collocated with a large screen rear-projection stereographic display, the PSFC is intended for advanced multimedia and virtual reality applications.}, language = {en} } @inproceedings{MartensHerderShiba1999, author = {Martens, William L. and Herder, Jens and Shiba, Yoshiki}, title = {A filtering model for efficient rendering of the spatial image of an occluded virtual sound source}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Rendering realistic spatial sound imagery for complex virtual environments must take into account the effects of obstructions such as reflectors and occluders. It is relatively well understood how to calculate the acoustical consequence that would be observed at a given observation point when an acoustically opaque object occludes a sound source. But the interference patterns generated by occluders of various geometries and orientations relative to the virtual source and receiver are computationally intense if accurate results are required. In many applications, however, it is sufficient to create a spatial image that is recognizable by the human listener as the sound of an occluded source. In the interest of improving audio rendering efficiency, a simplified filtering model was developed and its audio output submitted to psychophysical evaluation. Two perceptually salient components of occluder acoustics were identified that could be directly related to the geometry and orientation of a simple occluder. Actual occluder impulse responses measured in an anechoic chamber resembled the responses of a model incorporating only a variable duration delay line and a low-pass filter with variable cutoff frequenc}, language = {en} } @inproceedings{MartensHerder1999, author = {Martens, William L. and Herder, Jens}, title = {Perceptual criteria for eliminating reflectors and occluders from the rendering of environmental sound}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Given limited computational resources available for the rendering of spatial sound imagery, we seek to determine effective means for choosing whatcomponents of the rendering will provide the most audible differences in the results. Rather than begin with an analytic approach that attempts to predict audible differences on the basis of objective parameters, we chose to begin with subjective tests of how audibly different the rendering result may be heard to be when that result includes two types of sound obstruction: reflectors and occluders. Single-channel recordings of 90 short speech sounds were made in an anechoic chamber in the presence and absence of these two types of obstructions, and as the angle of those obstructions varied over a 90 degree range. These recordings were reproduced over a single loudspeaker in that anechoic chamber, and listeners were asked to rate how confident they were that the recording of each of these 90 stimuli included an obstruction. These confidence ratings can be used as an integral component in the evaluation function used to determine which reflectors and occluders are most important for rendering.}, language = {en} } @inproceedings{IshikawaHiroseHerder1998, author = {Ishikawa, Kimitaka and Hirose, Minefumi and Herder, Jens}, title = {A Sound Spatialization Server for a Speaker Array as an Integrated Part of a Virtual Environment}, series = {IEEE YUFORIC Germany 98}, booktitle = {IEEE YUFORIC Germany 98}, publisher = {IEEE}, address = {Stuttgart}, year = {1998}, abstract = {Spatial sound plays an important role in virtual reality environments, allowing orientation in space, giving a feeling of space, focusing the user on events in the scene, and substituting missing feedback cues (e.g., force feedback). The sound spatialization framework of the University of Aizu, which supports number of spatialization backends, has been extended to include a sound spatialization server for a multichannel loudspeaker array (Pioneer Sound Field Control System). Our goal is that the spatialization server allows easy integration into virtual environments. Modeling of distance cues, which are essential for full immersion, is discussed. Furthermore, the integration of this prototype into different applications allowed us to reveal the advantages and problems of spatial sound for virtual reality environments.}, language = {en} } @article{CohenHerderLMartens1999, author = {Cohen, Michael and Herder, Jens and L. Martens, William}, title = {Cyberspatial Audio Technology}, series = {The Journal of the Acoustical Society of Japan (E)}, volume = {20}, journal = {The Journal of the Acoustical Society of Japan (E)}, number = {6}, doi = {10.1250/ast.20.389}, pages = {389 -- 395}, year = {1999}, abstract = {Cyberspatial audio applications are distinguished from the broad range of spatial audio applications in a number of important ways that help to focus this review. Most significant is that cyberspatial audio is most often designed to be responsive to user inputs. In contrast to non-interactive auditory displays, cyberspatial auditory displays typically allow active exploration of the virtual environment in which users find themselves. Thus, at least some portion of the audio presented in a cyberspatial environment must be selected, processed, or otherwise rendered with minimum delay relative to user input. Besides the technological demands associated with realtime delivery of spatialized sound, the type and quality of auditory experiences supported are also very different from those associated with displays that support stationary sound localization.}, language = {en} } @incollection{CohenHerder1998, author = {Cohen, Michael and Herder, Jens}, title = {Symbolic representations of exclude and include for audio sources and sinks: Figurative suggestions of mute/solo \& cue and deafen/confide \& harken}, series = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, booktitle = {Virtual Environments '98, Proceedings of the Eurographics Workshop}, editor = {G{\"o}bel, Martin and Landauer, J{\"u}rgen and Lang, Ulrich and Wapler, Matthias}, publisher = {Springer-Verlag}, address = {Stuttgart}, isbn = {3-211-83233-5}, doi = {10.1007/978-3-7091-7519-4_23}, pages = {235 -- 242}, year = {1998}, language = {en} } @article{Herder1998, author = {Herder, Jens}, title = {Tools and Widgets for Spatial Sound Authoring}, series = {Computer Networks \& ISDN Systems}, volume = {30}, journal = {Computer Networks \& ISDN Systems}, number = {20-21}, publisher = {Elsevier}, pages = {1933 -- 1940}, year = {1998}, language = {en} } @inproceedings{HerderCohen1997, author = {Herder, Jens and Cohen, Michael}, title = {Sound Spatialization Resource Management in Virtual Reality Environments}, series = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, booktitle = {ASVA'97 -- Int. Symp. on Simulation, Visualization and Auralization for Acoustic Research and Education}, address = {Tokyo}, pages = {407 -- 414}, year = {1997}, abstract = {In a virtual reality environment users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. The sound spatialization resource manager controls sound resources and optimizes fidelity (presence) under given conditions. For that a priority scheme based on human psychophysical hearing is needed. Parameters for spatialization priorities include intensity calculated from volume and distance, orientation in the case of non-uniform radiation patterns, occluding objects, frequency spectrum (low frequencies are harder to localize), expected activity, and others. Objects which are spatially close together (depending on distance and direction) can be mixed. Sources that can not be spatialized can be treated as a single ambient sound source. Important for resource management is the resource assignment, i.e., minimizing swap operations, which makes it desirable to look-ahead and predict upcoming events in a scene. Prediction is achieved by monitoring objects' speed and past evaluation values. Fidelity is contrasted for Zifferent kind of resource restrictions and optimal resource assignment based upon unlimited dynamic scene look-ahead. To give standard and comparable results, the VRML 2.0 specification is used as an application programmer interface. Applicability is demonstrated with a helical keyboard, a polyphonic MIDI stream driven animation including user interaction (user moves around, playing together with programmed notes). The developed sound spatialization resource manager gives improved spatialization fidelity under runtime constraints. Application programmers and virtual reality scene designers are freed from the burden of assigning and predicting the sound sources.}, language = {en} } @inproceedings{AmanoMatsushitaYanagawaetal.1996, author = {Amano, Katsumi and Matsushita, Fumio and Yanagawa, Hirofumi and Cohen, Michael and Herder, Jens and Koba, Yoshiharu and Tohyama, Mikio}, title = {The Pioneer sound field control system at the University of Aizu Multimedia Center}, series = {RO-MAN '96 Tsukuba}, booktitle = {RO-MAN '96 Tsukuba}, publisher = {IEEE}, address = {Piscataway}, isbn = {0-7803-3253-9}, doi = {10.1109/ROMAN.1996.568887}, pages = {495 -- 499}, year = {1996}, abstract = {The PSFC, or Pioneer sound field control system, is a DSP-driven hemispherical 14-loudspeaker array, installed at the University of Aizu Multimedia Center. Collocated with a large screen rear-projection stereographic display the PSFC features realtime control of virtual room characteristics and direction of two separate sound channels, smoothly steering them around a configurable soundscape. The PSFC controls an entire sound field, including sound direction, virtual distance, and simulated environment (reverb level, room size and liveness) for each source. It can also configure a dry (DSP-less) switching matrix for direct directionalization. The PSFC speaker dome is about 14 m in diameter, allowing about twenty users at once to comfortably stand or sit near its sweet spot.}, language = {en} } @article{VersuemerSteffensBlaettermannetal.2020, author = {Vers{\"u}mer, Siegbert and Steffens, Jochen and Bl{\"a}ttermann, Patrick and Becker-Schweitzer, J{\"o}rg}, title = {Modelling evaluations of low-level sounds in everyday situations using linear machine learning for variable selection}, series = {Frontiers in Psychology}, volume = {11}, journal = {Frontiers in Psychology}, publisher = {Frontiers}, issn = {1664-1078}, doi = {10.3389/fpsyg.2020.570761}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-23779}, year = {2020}, language = {en} } @inproceedings{WilczekSteffensWeinzierl2020, author = {Wilczek, Tobias and Steffens, Jochen and Weinzierl, Stefan}, title = {Zum Einfluss der Akustik auf die Qualit{\"a}t von Restaurants. Eine Feldstudie}, series = {Fortschritte der Akustik - DAGA 2020: 46. Deutsche Jahrestagung f{\"u}r Akustik}, booktitle = {Fortschritte der Akustik - DAGA 2020: 46. Deutsche Jahrestagung f{\"u}r Akustik}, publisher = {Deutsche Gesellschaft f{\"u}r Akustik e.V.}, address = {Berlin}, doi = {10.14279/depositonce-9999}, pages = {854 -- 857}, year = {2020}, language = {de} } @article{AngladaTortKellerSteffensetal.2020, author = {Anglada-Tort, Manuel and Keller, Steve and Steffens, Jochen and M{\"u}llensiefen, Daniel}, title = {The Impact of Source Effects on the Evaluation of Music for Advertising}, series = {Journal of Advertising Research}, volume = {60}, journal = {Journal of Advertising Research}, number = {3}, publisher = {ARF}, doi = {10.2501/JAR-2020-016}, year = {2020}, language = {en} } @incollection{HerderNovotny2003, author = {Herder, Jens and Novotny, Thomas}, title = {Spatial Sound Design and Interaction for Virtual Environments in the Promotion of Architectural Designs}, series = {Third International Workshop on Spatial Media}, booktitle = {Third International Workshop on Spatial Media}, address = {Aizu-Wakamatsu}, pages = {7 -- 11}, year = {2003}, abstract = {Virtual environment walkthrough applications are generally enhanced by a user's interactions within a simulated architectural space, but the enhancement that stems from changes in spatial sound that are coupled with a user's behavior are particularly important, especially within regard to creating a sense of place. When accompanied by stereoscopic image synthesis, spatial sound can immerse the user in a high-realism virtual copy of the real world. An advanced virtual environment that allow users to change realtime rendering features with a few manipulations has been shown to enable switching between different versions of a modeled space while maintaining sensory immersion. This paper reports on an experimental project in which an architectural model is being integrated into such an interactive virtual environment. The focus is on the spatial sound design for supporting interaction, including demonstrations of both the possibilities and limitations of such applications in presenting and promoting architectural designs, as well as in three-dimensional sketching.}, language = {de} } @article{StruchholzHerderLeckschat2006, author = {Struchholz, Holger and Herder, Jens and Leckschat, Dieter}, title = {Sound radiation simulation of musical instruments based on interpolation and filtering of multi-channel recordings}, series = {Journal of the 3D-Forum Society}, volume = {20}, journal = {Journal of the 3D-Forum Society}, number = {1}, pages = {41 -- 47}, year = {2006}, abstract = {With the virtual environment developed here, the characteristic sound radiation patterns of musical instruments can be experienced in real-time. The user may freely move around a musical instrument, thereby receiving acoustic and visual feedback in real-time. The perception of auditory and visual effects is intensified by the combination of acoustic and visual elements, as well as the option of user interaction. The simulation of characteristic sound radiation patterns is based on interpolating the intensities of a multichannel recording and offers a near-natural mapping of the sound radiation patterns. Additionally, a simple filter has been developed, enabling the qualitative simulation of an instrument's characteristic sound radiation patterns to be easily implemented within real-time 3D applications. Both methods of simulating sound radiation patterns have been evaluated for a saxophone with respect to their functionality and validity by means of spectral analysis and an auditory experiment.}, language = {en} } @inproceedings{BoeldtSteffensBuettneretal.2020, author = {B{\"o}ldt, Sebastian and Steffens, Jochen and B{\"u}ttner, Clemens and Weinzierl, Stefan}, title = {Modellierung von Publikumsger{\"a}uschen bei Veranstaltungen mit Sprache und Musik}, series = {Fortschritte der Akustik - DAGA 2020: 46. Deutsche Jahrestagung f{\"u}r Akustik}, booktitle = {Fortschritte der Akustik - DAGA 2020: 46. Deutsche Jahrestagung f{\"u}r Akustik}, publisher = {Deutsche Gesellschaft f{\"u}r Akustik e.V.}, address = {Berlin}, organization = {Deutsche Gesellschaft f{\"u}r Akustik e.V}, doi = {10.14279/depositonce-9991}, pages = {850 -- 853}, year = {2020}, language = {de} } @article{BehbehaniSteffens2021, author = {Behbehani, Sami and Steffens, Jochen}, title = {Musical DIAMONDS: The influence of situational classes and characteristics on music listening behavior}, series = {Psychology of Music}, volume = {49}, journal = {Psychology of Music}, number = {6}, publisher = {sage journals}, issn = {1741-3087}, doi = {10.1177/0305735620968910}, pages = {1532 -- 1545}, year = {2021}, language = {en} } @inproceedings{KrauseBakerJGroarkeetal.2021, author = {Krause, Amanda and Baker J., David and Groarke, Jenny and Pereira, Ana I. and Liew, Kongmeng and Anglada-Tort, Manuel and Steffens, Jochen}, title = {A global investigation of music listening practices: The influence of country latitude and seasons on music preferences}, series = {ICMPC-ESCOM 2021: 16th International Conference on Music Perception and Cognition/11th Triennial conference of the European Society for the Cognitive Sciences of Music, 28-31 July 2021, Sheffield}, booktitle = {ICMPC-ESCOM 2021: 16th International Conference on Music Perception and Cognition/11th Triennial conference of the European Society for the Cognitive Sciences of Music, 28-31 July 2021, Sheffield}, address = {Sheffield}, year = {2021}, language = {en} } @incollection{EgermannLepaHerzogetal.2022, author = {Egermann, Hauke and Lepa, Steffen and Herzog, Martin and Steffens, Jochen}, title = {Evidenzbasierte Praxis im Music Branding - Musikalische Markenkommunikation wird erwachsen}, series = {Musik \& Marken}, booktitle = {Musik \& Marken}, editor = {Gr{\"u}newald-Schukalla, Lorenz and J{\´o}ri, Anita and Schwetter, Holger}, publisher = {Springer Fachmedien}, address = {Wiesbaden}, pages = {73 -- 91}, year = {2022}, language = {de} } @incollection{RosenthalVersuemerSteffens2022, author = {Rosenthal, Fabian and Vers{\"u}mer, Siegbert and Steffens, Jochen}, title = {Audioinhaltsanalyse und Multilevelmodellierung zur Vorhersage der Bewertung von Indoor Soundscapes}, series = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA)}, booktitle = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA)}, address = {Stuttgart}, year = {2022}, language = {de} } @inproceedings{VersuemerSteffensBlaettermann2022, author = {Vers{\"u}mer, Siegbert and Steffens, Jochen and Bl{\"a}ttermann, Patrick}, title = {Subjektive Lautheitsbewertung unter Einfluss situativer und personenbezogener Faktoren}, series = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA)}, booktitle = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA)}, address = {Stuttgart}, year = {2022}, language = {de} } @inproceedings{SteffensHimmelein2022, author = {Steffens, Jochen and Himmelein, Hendrik}, title = {Induced cognitive load influences unpleasantness judgments of modulated noise}, series = {Proceedings of the 24th International Congress on Acoustics}, booktitle = {Proceedings of the 24th International Congress on Acoustics}, address = {Gyeoungju, S{\"u}dkorea}, year = {2022}, language = {en} } @inproceedings{vonBergPrinzSteffens2022, author = {von Berg, Markus and Prinz, Lukas and Steffens, Jochen}, title = {Comparing individual perception of timbre and reverberance}, address = {Gyeoungju, S{\"u}dkorea}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-38706}, pages = {5}, year = {2022}, abstract = {Room reverberation alters the spatial impression and timbre of a sound by modulating its spectral and temporal characteristics. Thus, we argue that, on a perceptual level, reverberation basically breaks down into interaural differences and spectro-temporal cues and that the separation of a perceived timbre into a sound source and a surrounding room is a purely cognitive process. To investigate the connection between the perception of reverberation cues and timbre analysis, the sensitivity for changes in reverberation was compared to timbre perception abilities. The Timbre Perception Test was used to measure the perception of the temporal envelope, spectral centroid, and spectral flux of artificial sounds. Sensitivity for changes in reverberation time was tested with a discrimination task using speech and noise with speech-alike spectral and temporal envelopes as source signals. Musical and acoustical expertise was assessed through the Goldsmiths Musical Sophistication Index and self-reports on experience with and knowledge of acoustics. There was a considerable correlation between timbre and reverberance perception ability, but timbre perception and academic experience predicted only 41\% of the variance in reverberance perception. Still, perception abilities related to similar acoustical phenomena seem to be better indicators of listening skills than self-reports on acoustical or musical expertise.}, language = {en} } @inproceedings{HimmeleinSteffens2022, author = {Himmelein, H. and Steffens, Jochen}, title = {"Ich sehe, was Du h{\"o}rst!" - Verwendung der Pupillometrie zur Messung des Einflusses kognitiver Belastung auf die Ger{\"a}uschbewertung}, series = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA), Stuttgart}, booktitle = {Deutsche Jahrestagung f{\"u}r Akustik (DAGA), Stuttgart}, address = {Stuttgart}, year = {2022}, language = {de} } @inproceedings{LudwigBuechelHerderetal.2012, author = {Ludwig, Philipp and B{\"u}chel, Joachim and Herder, Jens and Vonolfen, Wolfgang}, title = {InEarGuide - A Navigation and Interaction Feedback System using In Ear Headphones for Virtual TV Studio Productions}, series = {9. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, booktitle = {9. Workshop Virtuelle und Erweiterte Realit{\"a}t der GI-Fachgruppe VR/AR}, address = {D{\"u}sseldorf}, year = {2012}, abstract = {This paper presents an approach to integrate non-visual user feedback in today's virtual tv studio productions. Since recent studies showed that systems providing vibro-tactile feedback are not sufficient for replacing the common visual feedback, we developed an audio-based solution using an in ear headphone system, enabling a talent to move, avoid and point to virtual objects in a blue or green box. The system consists of an optical head tracking system, a wireless in ear monitor system and a workstation, which performs all application and audio processing. Using head related transfer functions, the talent gets directional and distance cues. Past research showed, that generating reflections of the sounds and simulating the acoustics of the virtual room helps the listener to conceive the acoustical feedback, we included this technique as well. In a user study with 15 participants the performance of the system was evaluated.}, language = {en} } @article{HerzogLepaEgermannetal.2020, author = {Herzog, Martin and Lepa, Steffen and Egermann, Hauke and Schoenrock, Andreas and Steffens, Jochen}, title = {Towards a common terminology for music branding campaigns}, series = {Journal of Marketing Management}, volume = {36}, journal = {Journal of Marketing Management}, number = {1-2}, publisher = {Taylor \& Francis}, doi = {10.1080/0267257X.2020.1713856}, pages = {176 -- 209}, year = {2020}, language = {en} } @article{IrrgangSteffensEgermann2020, author = {Irrgang, Melanie and Steffens, Jochen and Egermann, Hauke}, title = {From acceleration to rhythmicity: Smartphone-assessed movement predicts properties of music}, series = {Journal of New Music Research}, volume = {4}, journal = {Journal of New Music Research}, number = {7}, publisher = {Taylor \& Francis}, doi = {10.1080/09298215.2020.1715447}, pages = {1 -- 14}, year = {2020}, language = {en} } @inproceedings{vonBergSchwoererPrinzetal.2023, author = {von Berg, Markus and Schw{\"o}rer, Paul and Prinz, Lukas and Steffens, Jochen}, title = {Analysis of physical and perceptual properties of room impulse responses: development of an online tool}, series = {Forum Acusticuum 2023: 10th Convention of European Acoustics Association, Turin, Italy, 11th-15th September 2023}, booktitle = {Forum Acusticuum 2023: 10th Convention of European Acoustics Association, Turin, Italy, 11th-15th September 2023}, publisher = {EAA}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-41955}, pages = {8}, year = {2023}, abstract = {Over the past decades, research in room acoustics has established several derivative measures of an impulse response, some of which are incorporated in the ISO 3382 standards. These parameters intend to represent perceptual qualities, but were developed without a consistent modeling of room acoustical perception. More recent research proposed comprehensive inventories of room acoustic perception that are purely based on evaluations by human subjects, such as the Room Acoustical Quality Index (RAQI). In this work RA-QI scores acquired for 70 room impulse responses were predicted from room acoustical parameters. Except for Reverberance, the prediction of RAQI factors performed rather poor. In most cases, the sound source had a greater impact on RAQI scores. All analyses are published in an online tool, where users can upload omnidirectional and binaural impulse responses, and instantly obtain and visualize several physical descriptors, as well as predicted RAQI scores for three different sound sources. So far, acceptable prediction accuracy is achieved for Reverberance, Strength, Irregular Decay, Clarity and Intimacy. Larger data sets of evaluated impulse responses are required to improve the model performance and enable reliable predictions of room acoustical quality. Therefore, the administration of RAQI evaluations within the website is currently being developed.}, language = {en} } @article{SteffensWilczekWeinzierl2021, author = {Steffens, Jochen and Wilczek, Tobias and Weinzierl, Stefan}, title = {Junk Food or Haute Cuisine to the Ear? - Investigating the Relationship Between Room Acoustics, Soundscape, Non-Acoustical Factors, and the Perceived Quality of Restaurants}, series = {Frontiers in Built Environment}, volume = {676009}, journal = {Frontiers in Built Environment}, publisher = {Frontiers}, issn = {2297-3362}, doi = {10.3389/fbuil.2021.676009}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-32115}, pages = {11}, year = {2021}, abstract = {Sound and music are well-studied aspects of the quality of experience in restaurants; the role of the room acoustical conditions, their influence on the visitors' soundscape evaluation and their impact on the overall customer satisfaction in restaurants, however, has received less scientific attention. The present field study therefore investigated whether sound pressure level, reverberation time, and soundscape pleasantness can predict factors associated with overall restaurant quality. In total, 142 persons visiting 12 restaurants in Berlin rated relevant acoustical and non-acoustical factors associated with restaurant quality. Simultaneously, the A-weighted sound pressure level (LA,eq,15) was measured, and the reverberation time in the occupied state (T20,occ) was obtained by measurements performed in the unoccupied room and a subsequent calculation of the occupied condition according to DIN 18041. Results from linear mixed-effects models revealed that both the LA,eq,15 and T20,occ had a significant influence on soundscape pleasantness and eventfulness, whereby the effect of T20,occ was meditated by the LA,eq,15. Also, the LA,eq,15 as well as soundscape pleasantness were significant predictors of overall restaurant quality. A comprehensive structural equation model including both acoustical and non-acoustical factors, however, indicates that the effect of soundscape pleasantness on overall restaurant quality is mediated by the restaurant's atmosphere. Our results support and extend previous findings which suggest that the acoustical design of restaurants involves a trade-off between comfort and liveliness, depending on the desired character of the place.}, language = {en} } @misc{VersuemerSteffensRosenthal2023, author = {Vers{\"u}mer, Siegbert and Steffens, Jochen and Rosenthal, Fabian}, title = {Extensive crowdsourced dataset of in-situ evaluated binaural soundscapes of private dwellings containing subjective sound-related and situational ratings along with person factors to study time-varying influences on sound perception - research data}, edition = {V.01.1}, address = {Zenodo}, doi = {10.5281/zenodo.7193937}, year = {2023}, abstract = {The soundscape approach highlights the role of situational factors in sound evaluations; however, only a few studies have applied a multi-domain approach including sound-related, person-related, and time-varying situational variables. Therefore, we conducted a study based on the Experience Sampling Method to measure the relative contribution of a broad range of potentially relevant acoustic and non-auditory variables in predicting indoor soundscape evaluations. Here we present the comprehensive dataset for which 105 participants reported temporally (rather) stable trait variables such as noise sensitivity, trait affect, and quality of life. They rated 6.594 situations regarding the soundscape standard dimensions, perceived loudness, and the saliency of its sound components and evaluated situational variables such as state affect, perceived control, activity, and location. To complement these subject-centered data, we additionally crowdsourced object-centered data by having participants make binaural measurements of each indoor soundscape at their homes using a low-(self-)noise recorder. These recordings were used to compute (psycho-)acoustical indices such as the energetically averaged loudness level, the A-weighted energetically averaged equivalent continuous sound pressure level, and the A-weighted five-percent exceedance level. This complex hierarchical data can be used to investigate time-varying non-auditory influences on sound perception and to develop soundscape indicators based on the binaural recordings to predict soundscape evaluations.}, subject = {soundscape}, language = {en} } @masterthesis{Rosenthal2022, type = {Bachelor Thesis}, author = {Rosenthal, Fabian}, title = {Einsatz und Anpassung von Methoden der Audio-Feature-Extraktion am Beispiel von Indoor Soundscapes}, address = {D{\"u}sseldorf}, organization = {Hochschule D{\"u}sseldorf}, doi = {10.20385/opus4-4399}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-43991}, school = {Hochschule D{\"u}sseldorf}, pages = {77}, year = {2022}, abstract = {Im h{\"a}uslichen Umfeld erleben Menschen abh{\"a}ngig von ihrer Wohnsituation unterschiedlichste Ger{\"a}uschumgebungen (Indoor Soundscapes), die einen hohen Einfluss auf das Wohlbefinden haben. Im Rahmen einer Feldstudie wurde daher die theoretisch und praktisch relevante Frage untersucht, welche Merkmale der wahrgenommenen Ger{\"a}uschumgebungen als besonders ereignisreich oder angenehm bewertet werden. Einhundertf{\"u}nf Teilnehmer*innen berichteten nach der Experience-Sampling-Methode zeitgesteuert {\"u}ber auftretende Ger{\"a}uschumgebungen des h{\"a}uslichen Alltags. Sie bewerteten deren subjektive Wirkung gem{\"a}ß Soundscape-Standard. Zudem fertigten sie in-situ Audioaufnahmen der Ger{\"a}uschszenarien an. Die 6594 Tonaufnahmen wurden einer Audioinhaltsanalyse unterzogen und im Zuge dessen wurden vier Featuresets verschiedener Berechnungsans{\"a}tze extrahiert. Mithilfe der Perzentilen LASSO-Regularisierung wurden lineare gemischte Modelle f{\"u}r Angenehmheit und Ereignisreichtum sowie drei Multilevel-Modelle f{\"u}r Angenehmheit aufgestellt. Die besten Modelle erkl{\"a}ren 9 \% der Varianz von Angenehmheit und 27 \% der Varianz von Ereignisreichtum durch feste Effekte selektierter Pr{\"a}diktoren aller getesteten Featuresets. Angenehmheit sinkt vor allem, wenn lautheitsbasierte Features hohe Werte zeigen. Ereignisreichtum ist am st{\"a}rksten abh{\"a}ngig von kurzen Spitzen des C-bewerteten Schalldruckpegels und wird im Vergleich zu Angenehmheit st{\"a}rker von Zeitschwankungen der Features bestimmt. Durch den Vergleich der Featuresets wird deutlich, dass die Modelleffekte bekannter psychoakustischer Gr{\"o}ßen durch Hinzuf{\"u}gen von MFCC-Features verbessert werden. Dar{\"u}ber hinaus wird anhand von Multilevel-Modellen gezeigt, dass die Angenehmheit des lautesten Viertels der Tonaufnahmen deutlich besser durch feste Effekte erkl{\"a}rbar ist als der leisere Rest des Datensatzes. Kongruent mit aktuellen Befragungsstudien kann durch Audiofeatures eine Kategorienabh{\"a}ngigkeit der Soundscape-Bewertungen belegt werden: Musik und Sprache werden als angenehmer bewertet, technische Ger{\"a}usche und Anlagenrauschen hingegen als unangenehmer. Hoher Ereignisreichtum h{\"a}ngt mit menschgemachten Ger{\"a}uschen (z. B. Poltern, Stuhlr{\"u}cken) zusammen. Diese Ergebnisse stellen eine wichtige Komponente in der umfassenden Beschreibung komplexer Ger{\"a}uschumgebungen dar und ihnen kommt in Zeiten der SARS-CoV-2-Pandemie, in denen Wohnraum vermehrt heterogen genutzt werden muss, eine erh{\"o}hte Bedeutung zu.}, language = {de} } @incollection{LoepthienHantschelRuthetal.2022, author = {Loepthien, Tim and Hantschel, Florian and Ruth, Nicolas and Steffens, Jochen and Shumaker, Randall}, title = {Flowerleben beim Musikh{\"o}ren: Die Wichtigkeit situativer und personenbezogener Variablen und Zusammenh{\"a}nge mit der aktuellen Stimmung}, series = {Abstract-Band zur 38. Jahrestagung der Deutschen Gesellschaft f{\"u}r Musikpsychologie}, booktitle = {Abstract-Band zur 38. Jahrestagung der Deutschen Gesellschaft f{\"u}r Musikpsychologie}, address = {W{\"u}rzburg}, organization = {DGM}, year = {2022}, language = {de} } @article{AngladaTortMastersSteffensetal.2022, author = {Anglada-Tort, Manuel and Masters, Nikhil and Steffens, Jochen and North, Adrian and M{\"u}llensiefen, Daniel}, title = {The Behavioural Economics of Music: Systematic review and future directions}, series = {Quarterly Journal of Experimental Psychology}, volume = {76}, journal = {Quarterly Journal of Experimental Psychology}, number = {5}, publisher = {SAGE}, isbn = {1747-0218}, issn = {1747-0226}, doi = {10.1177/17470218221113761}, year = {2022}, abstract = {Music-related decision-making encompasses a wide range of behaviours including those associated with listening choices, composition and performance, and decisions involving music education and therapy. Although research programmes in psychology and economics have contributed to an improved understanding of music-related behaviour, historically, these disciplines have been unconnected. Recently, however, researchers have begun to bridge this gap by employing tools from behavioural economics. This article contributes to the literature by providing a discussion about the benefits of using behavioural economics in music-decision research. We achieve this in two ways. First, through a systematic review, we identify the current state of the literature within four key areas of behavioural economics-heuristics and biases, social decision-making, behavioural time preferences, and dual-process theory. Second, taking findings of the literature as a starting point, we demonstrate how behavioural economics can inform future research. Based on this, we propose the Behavioural Economics of Music (BEM), an integrated research programme that aims to break new ground by stimulating interdisciplinary research in the intersection between music, psychology, and economics.}, language = {en} } @inproceedings{BrettschneiderHerderdeMooijetal.2019, author = {Brettschneider, Nico and Herder, Jens and de Mooij, Jeroen and Ryskeldiev, Bektur}, title = {Audio vs. Visual Avatars as Guides in Virtual Environments}, series = {21th International Conference on Human and Computer, HC-2018, March 27-28, 2019, Shizuoka University, Hamamatsu, Japan.}, booktitle = {21th International Conference on Human and Computer, HC-2018, March 27-28, 2019, Shizuoka University, Hamamatsu, Japan.}, editor = {Herder, Jens}, publisher = {Hochschule D{\"u}sseldorf}, address = {D{\"u}sseldorf}, organization = {Hochschule D{\"u}sseldorf}, doi = {10.20385/0hrj-qc02}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-23859}, pages = {9}, year = {2019}, abstract = {Through constant technical progress, multi-user virtual reality is transforming towards a social activity that is no longer only used by remote users, but also in large-scale location-based experiences. We evaluate the usage of realtime-tracked avatars in co-located business-oriented applications in a "guide-user-scenario" in comparison to audio only instructions. The present study examined the effect of an avatar-guide on the user-related factors of Spatial Presence, Social Presence, User Experience and Task Load in order to propose design guidelines for co-located collaborative immersive virtual environments. Therefore, an application was developed and a user study with 40 participants was conducted in order to compare both guiding techniques of a realtime-tracked avatar guide and a non-visualised guide with otherwise constant conditions. Results reveal that the avatar-guide enhanced and stimulated communicative processes while facilitating interaction possibilities and creating a higher sense of mental immersion for users. Furthermore, the avatar-guide appeared to make the storyline more engaging and exciting while helping users adapt to the medium of virtual reality. Even though no assertion could be made concerning the Task Load factor, the avatar-guide achieved a higher subjective value on User Experience. Due to the results, avatars can be considered valuable social elements in the design of future co-located collaborative virtual environments.}, language = {en} } @misc{vonBergSchwoererPrinzetal.2023, author = {von Berg, Markus and Schw{\"o}rer, Paul and Prinz, Lukas and Steffens, Jochen}, title = {OPRA: Akustische Analyse musikalischer Auff{\"u}hrungsr{\"a}ume im Browser}, series = {Akustik Journal}, journal = {Akustik Journal}, number = {3}, issn = {2569-1600}, pages = {41 -- 52}, year = {2023}, abstract = {Der Auff{\"u}hrungsraum leistet einen substantiellen Beitrag zum Erleben von Musikdarbietungen. Zur Beschreibung der Akustik von Auff{\"u}hrungsr{\"a}umen haben sich dabei zahlreiche Messgr{\"o}ßen etabliert, welche bestimmte Wahrnehmungsqualit{\"a}ten, wie z. B. die Halligkeit, abbilden sollen. Dem stellt j{\"u}ngere Forschung rein perzeptive Messinventare gegen{\"u}ber, die den Wahrnehmungseindruck als solchen, losgel{\"o}st vom physikalischen Ursprung, beschreiben. Dieser Artikel beschreibt einen Versuch, diese beiden Forschungsrichtungen zusammenzuf{\"u}hren, indem Bewertungen von Auff{\"u}hrungsr{\"a}umen nach dem perzeptiven Room Acoustical Quality Index (RAQI) durch physikalische Messgr{\"o}ßen modelliert wurden, welche aus einfachen Impulsantwortmessungen ermittelt werden. Dazu wurde eine Auswahl raumakustischer Messgr{\"o}ßen auf f{\"u}nf Dimensionen reduziert und diese als Pr{\"a}diktoren f{\"u}r die einzelnen Wahrnehmungsgr{\"o}ßen des RAQI getestet. Die Berechnung dieser Pr{\"a}diktionsmodelle wird vorgestellt und deren Pr{\"a}zision diskutiert. Anschließend wird die OPRA-Webanwendung vorgestellt, die sowohl die Berechnung der raumakustischen Messgr{\"o}ßen als auch die Pr{\"a}diktion von RAQI-Wahrnehmungsgr{\"o}ßen f{\"u}r die drei Quellsignale Sprache, Orchester und Solotrompete im Browser erm{\"o}glicht. Auch wenn die Pr{\"a}zision der Pr{\"a}diktionsmodelle noch nicht f{\"u}r alle RAQI-Faktoren zuverl{\"a}ssige Ergebnisse liefert, erlaubt OPRA eine umfangreiche akustische Analyse und eine erste Einsch{\"a}tzung der perzeptiven Wirkung von Auff{\"u}hrungsr{\"a}umen.}, language = {de} }