@inproceedings{Goetzelmann2017, author = {G{\"o}tzelmann, Timo}, title = {<> 3D Printable Hand Exoskeleton for the Haptic Exploration of Virtual 3D Scenes}, series = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, volume = {2017}, booktitle = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5227-7}, doi = {10.1145/3056540.3064950}, pages = {63 -- 66}, year = {2017}, abstract = {Virtual reality is currently experiencing a comeback. A considerable market has developed for VR computer games and educational applications. Some solutions integrate tracked devices which allow users to freely move within a certain space. Virtual 3D model can be visually explored, implemented collision detected allows users to get a feedback for instance by sound or vibration. For research projects there are several approaches which offer to get the actual feedback for the fingers of a hand, when the users virtually touches the surface of a 3D model. However, in the consumer market currently no product is sold which offers this direct feedback for the whole hand. In this paper we introduce a low-cost hand exoskeleton which is usable in conjunction with commodity hardware. It covers each of the five fingers of the user's hand, its design is open-source, low-cost, can be customized and 3D printed by individuals. It aims at improving the haptic perception of users, bases of a popular physical computing platform and is designed to be assembled even by electronically unexperienced users. We show the integration of our lean interface of the wireless exoskeleton into exemplary VR environment and describe a calibration process which is flexible for customizations.}, language = {en} } @inproceedings{GoetzelmannKargMueller2025, author = {G{\"o}tzelmann, Timo and Karg, Pascal and M{\"u}ller, Mareike}, title = {A Novel Concept using Mirror Displays for Ambient User Notifications}, publisher = {ACM}, doi = {10.1145/3733155.3733206}, pages = {315-323}, year = {2025}, abstract = {This paper presents a novel approach to notify users about tasks in their immediate environment. Our approach is consciously designed to be low-threshold, by using augmented mirror displays, no devices such as AR glasses or smartphones have to be worn or hold by the user. The mirror display allows people passing by to get awareness of which technical and non-technical objects that need attention in an unobtrusive way. These are conveyed visually in a uniform manner and can be easily observed by passing users. If the user decides to interact by slowing down or pausing, the display adapts to more detailed information about the augmented objects. Users can decide whether this is relevant to them or not. We present a novel interaction concept, several use cases, an implementation of our approach in a laboratory scenario and conduct a user study to evaluate the feasibility and effectiveness of our interaction concept. The results reveal a good performance of our system and based on this, open up new questions perspectives for future enhancements.}, language = {en} } @inproceedings{GoetzelmannBranzHeidenreichetal.2017, author = {G{\"o}tzelmann, Timo and Branz, Lisa and Heidenreich, Claudia and Otto, Markus}, title = {A Personal Computer-based Approach for 3D-Printing Accessible to Blind People}, series = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5227-7}, doi = {10.1145/3056540.3064954}, pages = {4}, year = {2017}, abstract = {Tactile materials play a major role in making information available to blind people and support their understanding for spatial matters. Due to the complex manual manufacturing process there is still a lack of suitable models for the visually impaired. Millions of 3D models are currently available on the internet and can be searched by dedicated retrieval sites. Most of them can be printed by 3D printers; however, this often isn't a trivial task even for sighted users. Blind peoples' self-dependence could be drastically increased if they were able to autonomously print 3D models at home. This paper analyses the individual tasks to actually print 3D models and adapts them to steps accessible for blind people. We introduce a workflow for the combined use of 3D printing software and consumer hardware. We verified our approach by a formal user study with visually impaired people which showed its feasibility.}, language = {en} } @inproceedings{WolfGoetzelmann2023, author = {Wolf, Philipp and G{\"o}tzelmann, Timo}, title = {A Systematic Review of Interaction Approaches based on Visually Evoked Potentials}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3594806.3594862}, pages = {396 -- 401}, year = {2023}, abstract = {BCIs are already present in the mass market. Visual evoked potentials (VEP) are based on the recognition of recurrent visual stimuli by specific BCI. A variety of approaches use this technology to implement a whole range of different application scenarios. This paper provides an overview of approaches that use VEP either as a stand-alone technology or in combination with other technologies. First, terms for different VEP technologies and their possible procedures are introduced. The papers are classified according to the technology used, the quality of the classification, the number of simultaneous stimuli, and different application areas. One focus of the paper is value input through VEP, especially for approaches that do not work in combination with other devices. It is shown that the value input has been addressed only very little so far.}, language = {en} } @inproceedings{FeitlKreimeierGoetzelmann2022, author = {Feitl, Selina and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Accessible Electrostatic Surface Haptics: Towards an Interactive Audiotactile Map Interface for People With Visual Impairments}, series = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3529190.3534781}, pages = {522 -- 531}, year = {2022}, abstract = {Tactile models, such as floor plans of a familiar or unfamiliar environment, can be helpful for people with visual impairments to grasp and interpret spatial information. Such plans are usually fabricated physically in a time-consuming process and are not interactive. This paper suggests presenting tactile floor plans using surface haptic feedback on an electrostatic display to overcome these limitations. Besides audiohaptically exploring tactile floor plans, our prototype also allows for voice interaction and demonstrates the control of smart home devices in this context. The evaluation was conducted in two stages with eight participants with visual impairments: First, it was investigated how individual rooms can be identified and assigned with electrostatic tactile feedback over a common dot matrix display. Second, the generation of a mental map when exploring an interactive detailed floor plan with several rooms was evaluated. Our results show that electrostatic haptic feedback enables people with visual impairments to recognize and understand graphic elements like rooms and a floor plan. However, this entails significantly more time for exploration and a higher cognitive workload when compared to a comparable but more expensive dot matrix display, though spatial and semantic smart home contextual information can be added through a voice interface, such as "What is the room's name I am touching?" or "Turn on the power socket in this room." Our preliminary but innovative approach reveals the initial potential of electrostatic feedback for accessibility and aims to help situate the opportunities and challenges in this context for wider dissemination.}, language = {en} } @inproceedings{GoetzelmannKreimeierSchwabletal.2021, author = {G{\"o}tzelmann, Timo and Kreimeier, Julian and Schwabl, Johannes and Karg, Pascal and Oumard, Christina and B{\"u}ttner, Florian}, title = {AmI-VR: An Accessible Building Information System as Case Study Towards the Applicability of Ambient Intelligence in Virtual Reality}, series = {Mensch und Computer 2021}, booktitle = {Mensch und Computer 2021}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3473856.3474032}, pages = {597 -- 600}, year = {2021}, abstract = {Ambient intelligence represents a paradigm in which the user does not react to the environment, but vice versa. Accordingly, smart environments can react to the presence and activities of users and support them unobtrusively from the background. Especially in the context of accessibility, this offers great potential that has so far only been demonstrated for individual user groups. To overcome this limitation, we propose the automated, user- and context-related adaptation of the modality as well as locality of the representation of building information in the form of both an adjustable table as well as two displays on the basis of a prototype for a library information center. For being independent from material and regulatory restrictions and for better planability (especially with the ongoing COVID-19 pandemic) we used in addition to the hardware components also a Virtual Reality simulation, which proved to be very useful. Further optimization and evaluation will be needed for a more in depth understanding and dissemination in the long run, yet our prototype aims to help fostering further activities in the field of ambient intelligence, accessibility and virtual reality as a planning tool.}, language = {en} } @article{GoetzelmannKargMueller2025, author = {G{\"o}tzelmann, Timo and Karg, Pascal and M{\"u}ller, Mareike}, title = {Augminded: Ambient Mirror Display Notifications}, series = {Multimodal Technologies and Interaction}, volume = {9}, journal = {Multimodal Technologies and Interaction}, number = {9}, publisher = {MDPI}, address = {Basel, Switzerland}, issn = {2414-4088}, doi = {10.3390/mti9090093}, pages = {22}, year = {2025}, abstract = {This paper presents a new approach for providing contextual information in real-world environments. Our approach is consciously designed to be low-threshold; by using mirrors as augmented reality surfaces, no devices such as AR glasses or smartphones have to be worn or held by the user. It enables technical and non-technical objects in the environment to be visually highlighted and thus subtly draw the attention of people passing by. The presented technology enables the provision of information that can be viewed in more detail by the user if required by slowing down their movement. Users can decide whether this is relevant to them or not. A prototype system was implemented and evaluated through a user study. The results show a high level of acceptance and intuitive usability of the system, with participants being able to reliably perceive and process the information displayed. The technology thus offers promising potential for the unobtrusive and context-sensitive provision of information in various application areas. The paper discusses limitations of the system and outlines future research directions to further optimize the technology and extend its applicability.}, language = {en} } @article{Goetzelmann2018, author = {G{\"o}tzelmann, Timo}, title = {Autonomous Selection and Printing of 3D Models for People Who Are Blind}, series = {ACM Transactions on Accessible Computing (TACCESS)}, volume = {11}, journal = {ACM Transactions on Accessible Computing (TACCESS)}, number = {3}, publisher = {ACM}, doi = {10.1145/3241066}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:92-opus4-5587}, pages = {1 -- 25}, year = {2018}, abstract = {3D models are an important means for understanding spatial contexts. Today these models can be materialized by 3D printing, which is increasingly used at schools for people with visual impairments. In contrast to sighted people, people with visual impairments have so far, however, neither been able to search nor to print 3D models without assistance. This article describes our work to develop an aid for people with visual impairments that would facilitate autonomous searching for and printing of 3D models. In our initial study, we determined the requirements to accomplish this task by means of a questionnaire and developed a first approach that allowed personal computer-based 3D printing. An extended approach allowed searching and printing using common smartphones. In our architecture, technical details of 3D printers are abstracted by a separate component that can be accessed via Wi-Fi independently of the actual 3D printer used. It comprises a search of the models in an annotated database and 3D model retrieval from the internet. The whole process can be controlled by voice interaction. The feasibility of autonomous 3D printing for people with visual impairments is shown with a first user study. Our second user study examines the usability of the user interface when searching for 3D models on the internet and preparing them for the materialization. The participants were able to define important printing settings, whereas other printing parameters could be determined algorithmically.}, language = {en} } @inproceedings{SchaeffPuglieseGoetzelmann2014, author = {Sch{\"a}ff, Christian and Pugliese, Gaston and G{\"o}tzelmann, Timo}, title = {Behavior Based Web User Identification}, publisher = {K{\"o}llenDruck+Verlag}, address = {Bonn}, isbn = {978-3-88579-447-9}, issn = {1614-3213}, pages = {201 - 204}, year = {2014}, abstract = {This paper examines different approaches for the identification of users by their personal behavior and discusses techniques which could be used in the context of websites. Such web tracking approaches have the potential to identify users even if they use multiple or shared devices. For web pages mouse and touch input are widely used. Therefore, we propose a survey to evaluate the feasibility to identify users by their interaction behavior.}, subject = {Authentifikation}, language = {en} } @inproceedings{UllmannKreimeierGoetzelmannetal.2020, author = {Ullmann, Daniela and Kreimeier, Julian and G{\"o}tzelmann, Timo and Kipke, Harald}, title = {BikeVR : a virtual reality bicycle simulator towards sustainable urban space and traffic planning}, series = {Proceedings of Mensch und Computer 2020}, booktitle = {Proceedings of Mensch und Computer 2020}, publisher = {Association for Computing Machinery}, address = {New York, NY}, isbn = {978-1-4503-7540-5}, doi = {10.1145/3404983.3410417}, pages = {511-514}, year = {2020}, abstract = {While becoming more and more aware of the ongoing climate change, eco-friendly means of transport for all citizens are moving further into focus. In order to be able to implement specific measures, it is necessary to better understand and emphasize sustainable transportation like walking and cycling through focused research. When developing novel traffic concepts and urban spaces for non-motorized traffic participants like bicycles and pedestrians, traffic and urban planning must be focused on their needs. To provide rare qualitative factors (such as stress, the perception of time and attractiveness of the environment) in this context, we present an audiovisual VR bicycle simulator which allows the user to cycle through a virtual urban environment by physically pedaling and also steering. Virtual Reality (VR) is a suitable tool in this context, as study participants find identical and almost freely definable (virtual) urban spaces with adjustable traffic scenarios. Our preliminary prototype proved to be promising and will be further optimized and evaluated.}, language = {en} }