@inproceedings{MartensHerder1999, author = {Martens, William L. and Herder, Jens}, title = {Perceptual criteria for eliminating reflectors and occluders from the rendering of environmental sound}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Given limited computational resources available for the rendering of spatial sound imagery, we seek to determine effective means for choosing whatcomponents of the rendering will provide the most audible differences in the results. Rather than begin with an analytic approach that attempts to predict audible differences on the basis of objective parameters, we chose to begin with subjective tests of how audibly different the rendering result may be heard to be when that result includes two types of sound obstruction: reflectors and occluders. Single-channel recordings of 90 short speech sounds were made in an anechoic chamber in the presence and absence of these two types of obstructions, and as the angle of those obstructions varied over a 90 degree range. These recordings were reproduced over a single loudspeaker in that anechoic chamber, and listeners were asked to rate how confident they were that the recording of each of these 90 stimuli included an obstruction. These confidence ratings can be used as an integral component in the evaluation function used to determine which reflectors and occluders are most important for rendering.}, language = {en} } @inproceedings{MartensHerderShiba1999, author = {Martens, William L. and Herder, Jens and Shiba, Yoshiki}, title = {A filtering model for efficient rendering of the spatial image of an occluded virtual sound source}, series = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, booktitle = {137th Regular Meeting of the Acoustical Society of America and the 2nd Convention of the European Acoustics Association}, publisher = {Acoustical Society of America, European Acoustics Association}, address = {Berlin}, year = {1999}, abstract = {Rendering realistic spatial sound imagery for complex virtual environments must take into account the effects of obstructions such as reflectors and occluders. It is relatively well understood how to calculate the acoustical consequence that would be observed at a given observation point when an acoustically opaque object occludes a sound source. But the interference patterns generated by occluders of various geometries and orientations relative to the virtual source and receiver are computationally intense if accurate results are required. In many applications, however, it is sufficient to create a spatial image that is recognizable by the human listener as the sound of an occluded source. In the interest of improving audio rendering efficiency, a simplified filtering model was developed and its audio output submitted to psychophysical evaluation. Two perceptually salient components of occluder acoustics were identified that could be directly related to the geometry and orientation of a simple occluder. Actual occluder impulse responses measured in an anechoic chamber resembled the responses of a model incorporating only a variable duration delay line and a low-pass filter with variable cutoff frequenc}, language = {en} } @phdthesis{Herder1999, author = {Herder, Jens}, title = {A Sound Spatialization Resource Management Framework}, publisher = {University of Tsukuba}, address = {Tsukuba}, organization = {University of Tsukuba}, year = {1999}, abstract = {In a virtual reality environment, users are immersed in a scene with objects which might produce sound. The responsibility of a VR environment is to present these objects, but a practical system has only limited resources, including spatialization channels (mixels), MIDI/audio channels, and processing power. A sound spatialization resource manager, introduced in this thesis, controls sound resources and optimizes fidelity (presence) under given conditions, using a priority scheme based on psychoacoustics. Objects which are spatially close together can be coalesced by a novel clustering algorithm, which considers listener localization errors. Application programmers and VR scene designers are freed from the burden of assigning mixels and predicting sound source locations. The framework includes an abstract interface for sound spatialization backends, an API for the VR environments, and multimedia authoring tools.}, language = {en} } @article{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Journal of the 3D-Forum Society}, volume = {13}, journal = {Journal of the 3D-Forum Society}, number = {3}, pages = {66 -- 70}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} } @inproceedings{Herder1999, author = {Herder, Jens}, title = {Visualization of a Clustering Algorithm of Sound Sources based on Localization Errors}, series = {Second International Conference on Human and Computer}, booktitle = {Second International Conference on Human and Computer}, address = {Aizu-Wakamatsu}, pages = {1 -- 5}, year = {1999}, abstract = {A module for soundscape monitoring and visualizing resource management processes was extended for presenting clusters, generated by a novel sound source clustering algorithm. This algorithm groups multiple sound sources together into a single representative source, considering localization errors depending on listener orientation. Localization errors are visualized for each cluster using resolution cones. Visualization is done in runtime and allows understanding and evaluation of the clustering algorithm.}, language = {en} }