@misc{Struchholz2005, author = {Struchholz, Holger}, title = {Interaktive Visualisierung und Abh{\"o}rung zur Klangabstrahlung von Musikinstrumenten - Interpolation und Filterung mehrkanaliger Aufnahmen unter Verwendung der geometrischen Relationen}, address = {D{\"u}sseldorf}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-1727}, pages = {147}, year = {2005}, abstract = {Ziel des Projektes ist die Entwicklung einer virtuellen Umgebung, die das charakteristische Klangabstrahlverhalten eines Musikinstruments in Echtzeit erfahrbar macht. Es wird eine virtuelle Umgebung geschaffen, in der sich der Benutzer frei um ein Musikinstrument bewegen kann und in Echtzeit ein akustisches und visuelles Feedback erh{\"a}lt. Durch die Verbindung der auditiven und visuellen Elemente und die M{\"o}glichkeit der Interaktion, wird das Erleben und die Wahrnehmung der Effekte intensiviert. Die Simulation des charakteristischen Klangabstrahlverhaltens erfolgt nicht durch eine rechenaufw{\"a}ndige Klangsynthese wie z.B. Physical Modeling, sondern basiert auf der Lautst{\"a}rkeinterpolation einer Mehrkanalaufnahme. Die Verwendung der realen Aufnahmen erm{\"o}glicht eine ann{\"a}hernd naturgetreue Abbildung des Klangabstrahlverhaltens und ist, im Gegensatz zu rechenaufw{\"a}ndigen Klangsyntheseverfahren, echtzeitf{\"a}hig. Zus{\"a}tzlich wurde ein einfacher Filter entwickelt, der das charakteristische Klangabstrahlverhalten des Instruments eher qualitativ simuliert und sich problemlos in Echtzeit 3D-Anwendungen implementieren l{\"a}sst. Die beiden entwickelten Methoden zur Simulation des Klangabstrahlverhaltens wurden mittels Spektralanalyse und anhand eines durchgef{\"u}hrten H{\"o}rtests auf ihre Funktionalit{\"a}t und ihre G{\"u}ltigkeit {\"u}berpr{\"u}ft.}, language = {de} } @inproceedings{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, booktitle = {First International Conference on Human and Computer, Aizu-Wakamatsu, September 1998}, address = {Aizu}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-788}, pages = {6}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling.}, language = {en} } @inproceedings{Herder1997, author = {Herder, Jens}, title = {Tools and widgets for spatial sound authoring}, series = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, booktitle = {CompuGraphics ' 97, Sixth International Conference on Computational Graphics and Visualization Techniques: Graphics in the Internet Age, Vilamoura, Portugal}, address = {Portugal}, isbn = {972-8342-02-0}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-896}, pages = {87 -- 95}, year = {1997}, abstract = {Broader use of virtual reality environments and sophisticated animations spawn a need for spatial sound. Until now, spatial sound design has been based very much on experience and trial and error. Most effects are hand-crafted, because good design tools for spatial sound do not exist. This paper discusses spatial sound authoring and its applications, including shared virtual reality environments based on VRML. New utilities introduced by this research are an inspector for sound sources, an interactive resource manager, and a visual soundscape manipulator. The tools are part of a sound spatialization framework and allow a designer/author of multimedia content to monitor and debug sound events. Resource constraints like limited sound spatialization channels can also be simulated.}, language = {en} } @article{Herder1998, author = {Herder, Jens}, title = {Sound Spatialization Framework: An Audio Toolkit for Virtual Environments}, series = {Journal of the 3D-Forum Society}, volume = {12}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {17 -- 22}, year = {1998}, abstract = {The Sound Spatialization Framework is a C++ toolkit and development environment for providing advanced sound spatialization for virtual reality and multimedia applications. The Sound Spatialization Framework provides many powerful display and user-interface features not found in other sound spatialization software packages. It provides facilities that go beyond simple sound source spatialization: visualization and editing of the soundscape, multiple sinks, clustering of sound sources, monitoring and controlling resource management, support for various spatialization backends, and classes for MIDI animation and handling. Keywords: sound spatialization, resource management, virtual environments, spatial sound authoring, user interface design, human-machine interfaces}, language = {en} } @inproceedings{HerderWilkeHeimbachetal.2009, author = {Herder, Jens and Wilke, Michael and Heimbach, Julia and G{\"o}bel, Sebastian and Marinos, Dionysios}, title = {Simple Actor Tracking for Virtual TV Studios Using a Photonic Mixing Device}, series = {12th International Conference on Human and Computer}, booktitle = {12th International Conference on Human and Computer}, address = {Hamamatsu / Aizu-Wakamatsu / D{\"u}sseldorf}, year = {2009}, abstract = {Virtual TV studios use actor tracking systems for resolving the occlusion of computer graphics and studio camera image. The actor tracking delivers the distance between actor and studio camera. We deploy a photonic mixing device, which captures a depth map and a luminance image at low resolution. The renderer engines gets one depth value per actor using the OSC protocol. We describe the actor recognition algorithm based on the luminance image and the depth value calculation. We discuss technical issues like noise and calibration.}, language = {en} } @inproceedings{GarbeHerbstHerder2007, author = {Garbe, Katharina and Herbst, Iris and Herder, Jens}, title = {Spatial Audio for Augmented Reality}, series = {10th International Conference on Human and Computer}, booktitle = {10th International Conference on Human and Computer}, address = {D{\"u}sseldorf, Aizu-Wakamatsu}, pages = {53 -- 58}, year = {2007}, abstract = {Using spatial audio successfully for augmented reality (AR) applications is a challenge, but is awarded with an improved user experience. Thus, we have extended the AR/VR framework \sc Morgan with spatial audio to improve users orientation in an AR application. In this paper, we investigate the users' capability to localize and memorize spatial sounds (registered with virtual or real objects). We discuss two scenarios. In the first scenario, the user localizes only sound sources and in the second scenario the user memorizes the location of audio-visual objects. Our results reflect spatial audio performance within the application domain and show which technology pitfalls still exist. Finally, we provide design recommendations for spatial audio AR environments.}, language = {en} } @article{vonBergSteffensWeinzierletal.2021, author = {von Berg, Markus and Steffens, Jochen and Weinzierl, Stefan and M{\"u}llensiefen, Daniel}, title = {Assessing room acoustic listening expertise}, series = {Journal of the Acoustical Society of America}, volume = {150}, journal = {Journal of the Acoustical Society of America}, number = {4}, publisher = {Acoustical Society of America}, doi = {10.1121/10.0006574}, pages = {2539-2548}, year = {2021}, abstract = {Musicians and music professionals are often considered to be expert listeners for listening tests on room acoustics. However, these tests often target acoustic parameters other than those typically relevant in music such as pitch, rhythm, amplitude, or timbre. To assess the expertise in perceiving and understanding room acoustical phenomena, a listening test battery was constructed to measure the perceptual sensitivity and cognitive abilities in the identification of rooms with different reverberation times and different spectral envelopes. Performance in these tests was related to data from the Goldsmiths Musical Sophistication Index, self-reported previous experience in music recording and acoustics, and academic knowledge on acoustics. The data from 102 participants show that sensory and cognitive abilities are both correlated significantly with musical training, analytic listening skills, recording experience, and academic knowledge on acoustics, whereas general interest in and engagement with music do not show any significant correlations. The regression models, using only significantly correlated criteria of musicality and professional expertise, explain only small to moderate amounts (11\%-28\%) of the variance in the "room acoustic listening expertise" across the different tasks of the battery. Thus, the results suggest that the traditional criteria for selecting expert listeners in room acoustics are only weak predictors of their actual performances.}, language = {en} } @article{LuizardSteffensWeinzierl2020, author = {Luizard, Paul and Steffens, Jochen and Weinzierl, Stefan}, title = {Singing in different rooms: Common or individual adaptation patterns to the acoustic conditions?}, series = {The Journal of the Acoustical Society of America}, volume = {147}, journal = {The Journal of the Acoustical Society of America}, number = {2}, publisher = {ASA}, doi = {10.1121/10.0000715}, year = {2020}, language = {en} } @article{SteffensMuellerSchulzetal.2020, author = {Steffens, Jochen and M{\"u}ller, Franz and Schulz, Melanie and Gibson, Samuel}, title = {The effect of inattention and cognitive load on unpleasantness judgments of environmental sounds}, series = {Applied Acoustics}, volume = {164}, journal = {Applied Acoustics}, publisher = {Elsevier}, issn = {0003-682X}, doi = {10.1016/j.apacoust.2020.107278}, year = {2020}, language = {en} } @techreport{Vogel2020, author = {Vogel, Peter}, title = {Investieren unter Nebenbedingungen - Teil 4: Erg{\"a}nzung zu Teil 3}, publisher = {Hochschule D{\"u}sseldorf}, address = {D{\"u}sseldorf}, organization = {Hochschule D{\"u}sseldorf}, issn = {2567-2347}, doi = {10.20385/2567-2347/2020.4}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:due62-opus-21206}, year = {2020}, abstract = {Der vierte Teil der Schriftenreihe "Trading" pr{\"a}sentiert eine statistische Auswertung von Kursdaten bei einer Haltefrist von einem Handelstag. Die Statistik beinhaltet Gewinnfaktor bzw. Rendite und Investitionsgrad aus Teil 1 sowie Volatilit{\"a}t, Chance, Risiko und Handelskosten, welche in Teil 3 eingef{\"u}hrt wurden. Kaufbedingungen werden wie in Teil 1 mit Hilfe des Kursverh{\"a}ltnisses an zwei aufeinander folgenden Handelstagen gebildet. Die Auswertung f{\"u}r den Markt FEBRDUSA_1 historischer Kursdaten liefert eine in sich schl{\"u}ssige Beschreibung des Einflussfaktors Kaufbedingung auf die k{\"u}nftige Kursentwicklung. Den gr{\"o}ßten Gewinn liefert eine Mean Reversion-Strategie, bei der die Chance deutlich {\"u}ber dem Risiko liegt. Da die Statistik die Begrenzung eingegangener Neuinvestitionen ber{\"u}cksichtigt, werden {\"U}bertreibungen vermieden, die einer arithmetischen Mittelwertbildung anhaften. Die statische Auswertung f{\"u}r einen mit Random Walks gebildeten Markt best{\"a}tigt, dass f{\"u}r diesen Fall keine Abh{\"a}ngigkeit von der Kaufbedingung besteht.}, language = {de} }