@inproceedings{DeitschGoetzelmannGallwitz2014, author = {Deitsch, Sergiu and G{\"o}tzelmann, Timo and Gallwitz, Florian}, title = {Smartphone Input Using Its Integrated Projector and Built-In Camera}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-07226-5}, doi = {10.1007/978-3-319-07227-2_13}, pages = {124 -133}, year = {2014}, abstract = {Touch input on modern smartphones can be tedious, especially if the touchscreen is small. Smartphones with integrated projectors can be used to overcome this limitation by projecting the screen contents onto a surface, allowing the user to interact with the projection by means of simple hand gestures. In this work, we propose a novel approach for projector smartphones that allows the user to remotely interact with the smartphone screen via its projection. We detect user's interaction using the built-in camera, and forward detected hand gestures as touch input events to the operating system. In order to avoid costly computations, we additionally use built-in motion sensors. We verify the proposed method using an implementation for the consumer smartphone Samsung Galaxy Beam equipped with a deflection mirror. © 2014 Springer International Publishing.}, subject = {App }, language = {en} } @inproceedings{KargStoehrJonasetal.2023, author = {Karg, Pascal and St{\"o}hr, Roman and Jonas, Lisa and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Reflect-AR: Insights into Mirror-Based Augmented Reality Instructions to Support Manual Assembly Tasks}, series = {Proceedings of the 16th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 16th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3594806.3594866}, pages = {62 -- 68}, year = {2023}, abstract = {Manual assembly tasks can be difficult and tedious without assistance. Here, augmented reality (AR) can help to ease the task load and lower the time and error rates by interactive in-situ instructions. Such approaches are often implemented by video or optical see-through AR. In order to test a system that is as low-threshold as possible and easy to implement, we developed a prototype with a mirror display supplemented by a RGB-D camera and evaluated it against an well-established AR HMD (HoloLens 2). For this purpose, in our study 10 participants placed 3D printed bricks via both technologies. The quantitative and qualitative analysis revealed that the fulfillment of this task with a professional, established product AR HMD yet remains unmatched in terms of the time required, error rate, usability and task load due to the prototype status and the remaining technical shortcomings. However, the mirror display setup prototype met with interest from the participants as a novel, but unfamiliar and thus more difficult to use way of interaction. Furthermore, we report implementation challenges and advises. The empirical insights of our prototype and the first-time comparison to an established AR HMD aims to foster refining future work with half-silvered AR mirrors as an little researched field and many different fields of application.}, language = {en} } @incollection{OumardKreimeierGoetzelmann2022, author = {Oumard, Christina and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Pardon? An Overview of the Current State and Requirements of Voice User Interfaces for Blind and Visually Impaired Users}, series = {Lecture Notes in Computer Science}, booktitle = {Lecture Notes in Computer Science}, publisher = {Springer International Publishing}, address = {Cham}, isbn = {9783031086472}, issn = {0302-9743}, doi = {10.1007/978-3-031-08648-9_45}, pages = {388 -- 398}, year = {2022}, abstract = {People with special needs like blind and visually impaired (BVI) people can particularly benefit from using voice assistants providing spoken information input and output in everyday life. However, it is crucial to understand their needs and include them in developing accessible and useful assistance systems. By conducting an online survey with 146 BVI people, this paper revealed that common voice assistants like Apple's Siri or Amazon's Alexa are used by a majority of BVI people and are also considered helpful. In particular, features in audio entertainment, internet access, and everyday life practical things like weather queries, time-related information (e.g., setting an alarm clock), checking calendar entries, and taking notes are particularly often used and appreciated. The participants also indicated that the integration of smart home devices, the optimization of existing functionalities, and voice input are important. Still, also potentially negative aspects such as data privacy and data security are relevant. Therefore, it seems particularly interesting to implement offline data processing as far as possible. Our results contribute to this development by providing an overview of empirically collected requirements for functions and implementation-related aspects.}, language = {en} } @inproceedings{LyKargKreimeieretal.2022, author = {Ly, Kim and Karg, Pascal and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Development and Evaluation of a Low-cost Wheelchair Simulator for the Haptic Rendering of Virtual Road Conditions}, series = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3529190.3529195}, pages = {32 -- 39}, year = {2022}, abstract = {Many streets and buildings are not accessible for wheelchair users, which impedes a major challenge for their mobility. Often, such challenges can be considered during the planning stage, which is why it is important to include this user group in the planning process. Ideally, these planning efforts should also be made visually and haptically navigable using Virtual Reality (VR) technology to allow for better imagination, accurate conclusions, and awareness of wheelchair users. This work optimizes previous approaches so that different roadway conditions can be made haptically perceptible. The proposed prototype provides a non-contact and adjustable brake for simulating inclined planes, which can be adjusted depending on personal and environmental parameters. In addition, it can simulate roads such as cobblestones using a tactile transducer. The individually optimized components were combined into a complete VR system and integrated into a virtual environment for evaluation. The qualitative and quantitative results showed that realistic simulation is possible, but further development steps towards holistic and dissemination-capable hardware and software are needed. To this end, our contribution aims to improve the long-term involvement of wheelchair users in planning processes and increase awareness of their mobility situation.}, language = {en} } @inproceedings{KargKreimeierGoetzelmann2021, author = {Karg, Pascal and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Build-and-Touch: A Low-Cost, DIY, Open-Source Approach Towards Touchable Virtual Reality}, series = {Proceedings of the 14th PErvasive Technologies Related to Assistive Environments Conference}, booktitle = {Proceedings of the 14th PErvasive Technologies Related to Assistive Environments Conference}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3453892.3462217}, pages = {258 -- 259}, year = {2021}, abstract = {Virtual Reality (VR) is attracting more and more attention from academic research and practical application with the current availability of low-cost and end-user friendly devices. In terms of haptic (rather than visual) interaction, however, this technology is still in its infancy and there are few devices that are inexpensive and technologically simple to operate, respectively to procure. In this context, we present a concept of how haptic interaction with VR data gloves can also succeed by means of a commercially available web cam, sophisticated tracking software, and homemade low-cost hardware. All hardware and software components are to be obtained inexpensively or are open-source in order to achieve the greatest possible dissemination potential. With this work, we intend to provide an important trigger for future improvements and dissemination in terms of both technology and areas of application.}, language = {en} } @inproceedings{GoetzelmannKreimeierSchwabletal.2021, author = {G{\"o}tzelmann, Timo and Kreimeier, Julian and Schwabl, Johannes and Karg, Pascal and Oumard, Christina and B{\"u}ttner, Florian}, title = {AmI-VR: An Accessible Building Information System as Case Study Towards the Applicability of Ambient Intelligence in Virtual Reality}, series = {Mensch und Computer 2021}, booktitle = {Mensch und Computer 2021}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3473856.3474032}, pages = {597 -- 600}, year = {2021}, abstract = {Ambient intelligence represents a paradigm in which the user does not react to the environment, but vice versa. Accordingly, smart environments can react to the presence and activities of users and support them unobtrusively from the background. Especially in the context of accessibility, this offers great potential that has so far only been demonstrated for individual user groups. To overcome this limitation, we propose the automated, user- and context-related adaptation of the modality as well as locality of the representation of building information in the form of both an adjustable table as well as two displays on the basis of a prototype for a library information center. For being independent from material and regulatory restrictions and for better planability (especially with the ongoing COVID-19 pandemic) we used in addition to the hardware components also a Virtual Reality simulation, which proved to be very useful. Further optimization and evaluation will be needed for a more in depth understanding and dissemination in the long run, yet our prototype aims to help fostering further activities in the field of ambient intelligence, accessibility and virtual reality as a planning tool.}, language = {en} } @inproceedings{KreimeierUllmannKipkeetal.2020, author = {Kreimeier, Julian and Ullmann, Daniela and Kipke, Harald and G{\"o}tzelmann, Timo}, title = {Initial Evaluation of Different Types of Virtual Reality Locomotion Towards a Pedestrian Simulator for Urban and Transportation Planning}, series = {Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems}, booktitle = {Extended Abstracts of the 2020 CHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3334480.3382958}, pages = {1 -- 6}, year = {2020}, abstract = {The simulation of human behaviour in today's travel demand models is usually based on the assumption of a rational behaviour of its participants. Since travel demand models have been applied in particular for motorized traffic, only little is known about the influence of variables that affect both the choice of trip destination and the route decision in pedestrian and cycling models. In order to create urban spaces that encourage cycling and walking, we propose a VR (Virtual Reality) pedestrian simulator which involves walk-in-place locomotion. Thus, identical conditions are obtained for all subjects which is not feasible in real world field research with naturally varying environmental influences. As a first step, our qualitative and quantitative user study revealed that walking in a VR treadmill felt safest and most intuitive, although walking in it took in return more energy than walking-in-place with VR trackers only.}, language = {en} } @inproceedings{KreimeierKargGoetzelmann2020, author = {Kreimeier, Julian and Karg, Pascal and G{\"o}tzelmann, Timo}, title = {Tabletop virtual haptics}, series = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3389194}, pages = {1 -- 10}, year = {2020}, abstract = {When thinking of Virtual Reality (VR), most people think of stunning audio-visual environments in the context of entertainment. However, VR can also provide haptic information, e.g., to convey spatial information to blind and visually impaired people. In this context of accessibility they might be able to explore independently and self-determined tactile graphics, e.g., the structure of unknown real places prior to visiting them. Thus, we propose and evaluate tabletop virtual objects that can be felt by nowadays commercially available VR components, instead of exploring physical models (e.g., 3D printed maps) with the bare hand. These can be easily placed on an empty table, giving the blind user faster and more independent access to tactile information than with real physical representations. Our comprehensive pilot user study shows that it is possible to recognize floor plans and simple geometric shapes in this context. Also, the insights gained with regard to the suitability for practical application in this context point out the way to eased access to spatial (virtual) information using off-the-shelf components which can significantly support blind and visually impaired users' autonomy.}, language = {en} } @inproceedings{KreimeierKargGoetzelmann2020, author = {Kreimeier, Julian and Karg, Pascal and G{\"o}tzelmann, Timo}, title = {BlindWalkVR}, series = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3389193}, pages = {1 -- 8}, year = {2020}, abstract = {Virtual Reality (VR) promises expanded access to spatial information, especially for blind and visually impaired people. Through haptic and acoustic feedback, real world's limitations like the risk of injury or the necessity of a sighted safety assistant can be circumvented. However, the best possible profit of this technology requires interactive locomotion in large virtual environments to overcome real world space limitations. Thus, we present formative insights of blind people's egocentric VR locomotion by comparing four different implementations (i.e., two VR treadmills, trackers on the ankles or joystick based locomotion) in a qualitative and quantitative user study with seven blind and visually impaired participants. Our results reveal novel insights on characteristics of each implementation in terms of usability and practicability and also provide recommendations for further work in this field with the target user group in sight.}, language = {en} } @inproceedings{KreimeierKappeGoetzelmann2020, author = {Kreimeier, Julian and Kappe, Maximilian and G{\"o}tzelmann, Timo}, title = {BlindScanLine}, series = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3393742}, pages = {1 -- 4}, year = {2020}, abstract = {Sonification is a promising way for blind and visually impaired people to capture and process information purely auditory, e.g., by mapping distance metrics on sound characteristics. To optimally use the users' sensory bandwidth and cognitive load a sequential scanning at multiple azimuth angles instead of only one measuring point straight ahead could be a suitable option. Thus, we present a preliminary cross-platform implementation and evaluation of such a sequential 'line-scanning sonification' using off-the-shelf components while comparing frequency (FM) and amplitude modulation (AM). In our user study with blindfolded and visually impaired participants, users gained a more accurate mental model in significantly shorter time by means of FM compared to AM and the HoloLens' usability was rated better than our LIDAR prototype's. These initial findings show possibilities for further improvement, so that similar approaches could be used more and better in blind and visually impaired peoples' everyday life.}, language = {en} } @article{KreimeierGoetzelmann2020, author = {Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Two Decades of Touchable and Walkable Virtual Reality for Blind and Visually Impaired People: A High-Level Taxonomy}, series = {Multimodal Technologies and Interaction}, volume = {4}, journal = {Multimodal Technologies and Interaction}, number = {4}, publisher = {MDPI AG}, issn = {2414-4088}, doi = {10.3390/mti4040079}, year = {2020}, abstract = {Although most readers associate the term virtual reality (VR) with visually appealing entertainment content, this technology also promises to be helpful to disadvantaged people like blind or visually impaired people. While overcoming physical objects' and spaces' limitations, virtual objects and environments that can be spatially explored have a particular benefit. To give readers a complete, clear and concise overview of current and past publications on touchable and walkable audio supplemented VR applications for blind and visually impaired users, this survey paper presents a high-level taxonomy to cluster the work done up to now from the perspective of technology, interaction and application. In this respect, we introduced a classification into small-, medium- and large-scale virtual environments to cluster and characterize related work. Our comprehensive table shows that especially grounded force feedback devices for haptic feedback ('small scale') were strongly researched in different applications scenarios and mainly from an exocentric perspective, but there are also increasingly physically ('medium scale') or avatar-walkable ('large scale') egocentric audio-haptic virtual environments. In this respect, novel and widespread interfaces such as smartphones or nowadays consumer grade VR components represent a promising potential for further improvements. Our survey paper provides a database on related work to foster the creation process of new ideas and approaches for both technical and methodological aspects.}, language = {en} } @inproceedings{GoetzelmannKreimeier2020, author = {G{\"o}tzelmann, Timo and Kreimeier, Julian}, title = {Towards the inclusion of wheelchair users in smart city planning through virtual reality simulation}, series = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3398008}, pages = {1 -- 7}, year = {2020}, abstract = {The planning of Smart Cities is a complex task. In particular, accessibility rules based on legal regulations but also on empirical values must be observed. However, it is difficult to determine in advance the exact needs of people with disabilities for concrete planning. Previous approaches mainly aimed at existing urban environments. Ideally, citizens should be directly involved in the planning process of buildings and urban environments. For existing urban environments, crowdsourcing approaches exist to obtain suggestions for improvement from citizens. We present a novel approach for the direct integration of wheelchair users in the urban environments to be planned (participatory urban development) in virtual reality. We present an easy-to-reproduce simulator that allows wheelchair users to directly explore the planned buildings and urban environments in a virtual, spatial environment. This means that these 3D models can be commented already in the planning phase and provide valuable information about accessibility.}, language = {en} } @inproceedings{GoetzelmannKreimeier2020, author = {G{\"o}tzelmann, Timo and Kreimeier, Julian}, title = {Participation of elderly people in smart city planning by means of virtual reality}, series = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 13th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3397649}, pages = {1 -- 2}, year = {2020}, abstract = {Urbanisation is progressing, the population in many countries has a growing proportion of old people. This must be taken into account when transforming urban environments into Smart Cities. On the one hand, general accessibility rules must be taken into account in the planning of buildings and urban environments. However, an essential requirement for transformation is the participation of citizens, i.e., concrete suggestions for improvement from citizen should be taken into account. Elderly people are a valuable source of information for improvements. Instead of simply involving them in the modification of existing facilities, our approach suggests that they should be included in the planning process. However, abstract plans and questionnaires allow only limited insights for ordinary citizen. Therefore, our approach aims at providing a suitable interface to interactively walk through and annotate virtual reality plans for buildings and city districts. We present a working prototype for elderly people based on 3D consumer hardware appropriate form of locomotion with which they can explore and annotate urban planning according to their individual needs.}, language = {en} } @inproceedings{GoetzelmannKreimeier2020, author = {G{\"o}tzelmann, Timo and Kreimeier, Julian}, title = {Optimization of navigation considerations of people with visual impairments through ambient intelligence}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3389189.3398009}, pages = {1 -- 6}, year = {2020}, abstract = {As urbanization progresses, cities are becoming increasingly complex. To make this complexity advantageous, Smart Cities integrate intelligent sensors that communicate with each other and are invisible to citizens, so that they can offer additional services. Despite the fact that the inclusion of diverse citizens is an essential requirement of Smart Cities, Ambient Intelligence is often not considered under accessibility aspects. The central question in this context is how disadvantaged groups in particular can benefit from Smart Cities. It is extremely important for people with disabilities to be able to move autonomously in public spaces. For this purpose, however, people who are blind sometimes need to be able to ask other people for directions. Knowing whether people are present can be important information. In our exemplary case study, Ambient Intelligence is used for people with visual impairments, so that they can decide whether they want to go to this or an alternative place based on the information about the density of people in a place.}, language = {en} } @inproceedings{OumardKreimeierGoetzelmann2022, author = {Oumard, Christina and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Implementation and Evaluation of a Voice User Interface with Offline Speech Processing for People who are Blind or Visually Impaired}, series = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3529190.3529197}, pages = {277 -- 285}, year = {2022}, abstract = {Assistive technologies help blind and visually impaired people to manage their daily lives independently. However, they usually have to work with voice user interfaces to use smartphones and tablets. Tasks like managing the calendar, taking notes, or setting an alarm clock require reliable voice recognition, which entails online access for remote computing. However, apart from data privacy and security issues, an online connection is not available in any situation. In this regard, our paper develops and evaluates an offline voice user interface with offline speech processing. The voice assistant was tested with seven blind and visually impaired people. It was found that the assistant was very well received (i.e., pragmatic, hedonic, and general quality) and that no functional limitations could be perceived due to the offline data processing. Based on these findings, the scope of functionality and the level of detail of the evaluation can be extended further to adapt this technology for this specific user group and promote its dissemination.}, language = {en} } @article{Goetzelmann2018, author = {G{\"o}tzelmann, Timo}, title = {Visually Augmented Audio-Tactile Graphics for Visually Impaired People}, series = {ACM Transactions on Accessible Computing (TACCESS)}, volume = {2018}, journal = {ACM Transactions on Accessible Computing (TACCESS)}, number = {Volume 11, Issue 2, Article No. 8}, publisher = {ACM}, doi = {10.1145/3186894}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:92-opus4-5571}, pages = {31}, year = {2018}, abstract = {Tactile graphics play an essential role in knowledge transfer for blind people. The tactile exploration of these graphics is often challenging because of the cognitive load caused by physiological constraints and their complexity. The coupling of physical tactile graphics with electronic devices offers to support the tactile exploration by auditory feedback. Often, these systems have strict constraints regarding their mobility or the process of coupling both components. Additionally, visually impaired people cannot appropriately benefit from their residual vision. This article presents a concept for 3D printed tactile graphics, which offers to use audio-tactile graphics with usual smartphones or tablet-computers. By using capacitive markers, the coupling of the tactile graphics with the mobile device is simplified. These tactile graphics integrating these markers can be printed in one turn by off-the-shelf 3D printers without any post-processing and allows us to use multiple elevation levels for graphical elements. Based on the developed generic concept on visually augmented audio-tactile graphics, we presented a case study for maps. A prototypical implementation was tested by a user study with visually impaired people. All the participants were able to interact with the 3D printed tactile maps using a standard tablet computer. To study the effect of visual augmentation of graphical elements, we conducted another comprehensive user study. We tested multiple types of graphics and obtained evidence that visual augmentation may offer clear advantages for the exploration of tactile graphics. Even participants with a minor residual vision could solve the tasks with visual augmentation more quickly and accurately.}, language = {en} } @inproceedings{GoetzelmannSchneider2016, author = {G{\"o}tzelmann, Timo and Schneider, Daniel}, title = {CapCodes: Capacitive 3D Printable Identification and On-screen Tracking for Tangible Interaction}, series = {NordiCHI '16: Proceedings of the 9th Nordic Conference on Human-Computer Interaction}, volume = {2016}, booktitle = {NordiCHI '16: Proceedings of the 9th Nordic Conference on Human-Computer Interaction}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-4763-1}, doi = {10.1145/2971485.2971518}, pages = {4}, year = {2016}, abstract = {Electronic markers can be used to link physical representations and virtual content for tangible interaction, such as visual markers commonly used for tabletops. Another possibility is to leverage capacitive touch inputs of smartphones, tablets and notebooks. However, existing approaches either do not couple physical and virtual representations or require significant post-processing. This paper presents and evaluates a novel approach using a coding scheme for the automatic identification of tangibles by touch inputs when they are touched and shifted. The codes can be generated automatically and integrated into a great variety of existing 3D models from the internet. The resulting models can then be printed completely in one cycle by off-the-shelf 3D printers; post processing is not needed. Besides the identification, the object's position and orientation can be tracked by touch devices. Our evaluation examined multiple variables and showed that the CapCodes can be integrated into existing 3D models and the approach could also be applied to untouched use for larger tangibles.}, language = {en} } @inproceedings{GoetzelmannPavkovic2014, author = {G{\"o}tzelmann, Timo and Pavkovic, Aleksander}, title = {Towards Automatically Generated Tactile Detail Maps by 3D Printers for Blind Persons}, series = {Computers Helping People with Special Needs}, booktitle = {Computers Helping People with Special Needs}, publisher = {Springer}, isbn = {978-3-319-08599-9}, doi = {10.1007/978-3-319-08599-9_1}, pages = {1-7}, year = {2014}, abstract = {This paper introduces an approach for the (semi)automatic generation of worldwide available, detailed tactile maps including buildings and blind-specific features based on recognized illustrators' guidelines and standards. These guidelines for tactile maps are investigated in order to define a formal rule set and to automatically filter map data accordingly. Using the rule set, our approach automatically abstracts map data in order to generate a 2.1D tactile model providing multiple height levels (layers) which can be printed by usual consumer 3D printers. Based on the popular OpenStreetMap map data, our automated approach allows to generate arbitrary detail maps blind persons individually interested in, without the need for manual adaption of the tactile map. Thus, this approach contributes to the goal to increase the autonomy of blind persons.}, subject = {3D-Drucker}, language = {en} } @inproceedings{SchaeffPuglieseGoetzelmann2014, author = {Sch{\"a}ff, Christian and Pugliese, Gaston and G{\"o}tzelmann, Timo}, title = {Behavior Based Web User Identification}, publisher = {K{\"o}llenDruck+Verlag}, address = {Bonn}, isbn = {978-3-88579-447-9}, issn = {1614-3213}, pages = {201 - 204}, year = {2014}, abstract = {This paper examines different approaches for the identification of users by their personal behavior and discusses techniques which could be used in the context of websites. Such web tracking approaches have the potential to identify users even if they use multiple or shared devices. For web pages mouse and touch input are widely used. Therefore, we propose a survey to evaluate the feasibility to identify users by their interaction behavior.}, subject = {Authentifikation}, language = {en} } @inproceedings{GoetzelmannEichler2015, author = {G{\"o}tzelmann, Timo and Eichler, Laura}, title = {BlindWeb Maps - An Interactive Web Service for the Selection and Generation of Personalized Audio-Tactile Maps}, series = {Proc. 15th International Conference on Computers Helping People with Special Needs}, volume = {2015}, booktitle = {Proc. 15th International Conference on Computers Helping People with Special Needs}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-41266-5}, pages = {139 -- 145}, year = {2015}, abstract = {Tactile maps may contribute to the orientation of blind people or alternatively be used for navigation. In the past, the generation of these maps was a manual task which considerably limited their availability. Nowadays, similar to visual maps, tactile maps can also be generated semi-automatically by tools and web services. The existing approaches enable users to generate maps by entering a specific address or point of interest. This can in principle be done by a blind user. However, these approaches actually show an image of the map on the users display which cannot be read by screen readers. Consequently, the blind user does not know what is on the map before it is printed. Ideally, the map selection process should give the user more information and freedom to select the desired excerpt. This paper introduces a novel web service for blind people to interactively select and automatically generate tactile maps. It adapts the interaction concept for map selection to the requirements of blind users whilst supporting multiple printing technologies. The integrated audio review of the map's contents allows earlier feedback to review if the currently selected map extract corresponds to the desired information need. Changes can be initiated before the map is printed which, especially for 3D printing, saves much time. The user is able to select map features to be included in the tactile map. Furthermore, the map rendering can be adapted to different zoom levels and supports multiple printing technologies. Finally, an evaluation with blind users was used to refine our approach.}, language = {en} } @inproceedings{GoetzelmannAlthaus2016, author = {G{\"o}tzelmann, Timo and Althaus, Christopher}, title = {TouchSurfaceModels: Capacitive Sensing Objects through 3D Printers}, series = {PETRA '16: Proceedings of the 9th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, volume = {2016}, booktitle = {PETRA '16: Proceedings of the 9th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-4337-4}, pages = {8}, year = {2016}, abstract = {Nowadays, 3D models can be downloaded from the internet and increasingly be printed by low cost 3D printers. In the future, blind people could benefit from this tendency. Unfortunately, many of these models are rather complex and not appropriate for the purely tactile exploration. To obtain quantitative data about how 3D printable models for blind people should be constructed, the tactile exploration can be recorded by video. However, the analysis of these videos is quite time consuming and expensive. Additionally, inaccuracies and masking effects may impede the use of this technique. In this paper we introduce a novel approach to automatically equip existing 3D models with a mesh of conductive wires which enable a touch sensitive surface for the printed 3D objects. These touch sensing 3D models can be printed in one turn by off-the-shelf 3D printers and used as an alternative to video recording. It allows exact registration of when and where the 3D object has been touched. In our multi-touch solution, particular attention has been paid to limit the number of necessary wires between 3D object and sensing electronics. Finally, our approach is evaluated by a feasibility study.}, language = {en} } @article{Goetzelmann2018, author = {G{\"o}tzelmann, Timo}, title = {Autonomous Selection and Printing of 3D Models for People Who Are Blind}, series = {ACM Transactions on Accessible Computing (TACCESS)}, volume = {11}, journal = {ACM Transactions on Accessible Computing (TACCESS)}, number = {3}, publisher = {ACM}, doi = {10.1145/3241066}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:92-opus4-5587}, pages = {1 -- 25}, year = {2018}, abstract = {3D models are an important means for understanding spatial contexts. Today these models can be materialized by 3D printing, which is increasingly used at schools for people with visual impairments. In contrast to sighted people, people with visual impairments have so far, however, neither been able to search nor to print 3D models without assistance. This article describes our work to develop an aid for people with visual impairments that would facilitate autonomous searching for and printing of 3D models. In our initial study, we determined the requirements to accomplish this task by means of a questionnaire and developed a first approach that allowed personal computer-based 3D printing. An extended approach allowed searching and printing using common smartphones. In our architecture, technical details of 3D printers are abstracted by a separate component that can be accessed via Wi-Fi independently of the actual 3D printer used. It comprises a search of the models in an annotated database and 3D model retrieval from the internet. The whole process can be controlled by voice interaction. The feasibility of autonomous 3D printing for people with visual impairments is shown with a first user study. Our second user study examines the usability of the user interface when searching for 3D models on the internet and preparing them for the materialization. The participants were able to define important printing settings, whereas other printing parameters could be determined algorithmically.}, language = {en} } @inproceedings{GoetzelmannVazquez2015, author = {G{\"o}tzelmann, Timo and V{\´a}zquez, Pere-Pau}, title = {InclineType: An Accelerometer-based Typing Approach for Smartwatches}, series = {Proc. 16th International Conference on Human Computer Interaction}, volume = {2015}, booktitle = {Proc. 16th International Conference on Human Computer Interaction}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-3463-1}, doi = {10.1145/2829875.2829929}, year = {2015}, abstract = {Small mobile devices such as smartwatches are a rapidly growing market. However, they share the issue of limited input and output space which could impede the success of these devices in future. Hence, suitable alternatives to the concepts and metaphors known from smartphones have to be found. In this paper we present InclineType a tilt-based keyboard input that uses a 3-axis accelerometer for smartwatches. The user may directly select letters by moving his/her wrist and enters them by tapping on the touchscreen. Thanks to the distribution of the letters on the edges of the screen, the keyboard dedicates a low amount of space in the smartwatch. In order to optimize the user input our concept proposes multiple techniques to stabilize the user interaction. Finally, a user study shows that users get familiar with this technique with almost no previous training, reaching speeds of about 6 wpm in average.}, language = {en} } @article{WolfGoetzelmann2023, author = {Wolf, Philipp and G{\"o}tzelmann, Timo}, title = {VEPdgets: Towards Richer Interaction Elements Based on Visually Evoked Potentials}, series = {Sensors}, volume = {23}, journal = {Sensors}, number = {22}, publisher = {MDPI AG}, issn = {1424-8220}, doi = {10.3390/s23229127}, year = {2023}, abstract = {For brain-computer interfaces, a variety of technologies and applications already exist. However, current approaches use visual evoked potentials (VEP) only as action triggers or in combination with other input technologies. This paper shows that the losing visually evoked potentials after looking away from a stimulus is a reliable temporal parameter. The associated latency can be used to control time-varying variables using the VEP. In this context, we introduced VEP interaction elements (VEP widgets) for a value input of numbers, which can be applied in various ways and is purely based on VEP technology. We carried out a user study in a desktop as well as in a virtual reality setting. The results for both settings showed that the temporal control approach using latency correction could be applied to the input of values using the proposed VEP widgets. Even though value input is not very accurate under untrained conditions, users could input numerical values. Our concept of applying latency correction to VEP widgets is not limited to the input of numbers.}, language = {en} } @inproceedings{Goetzelmann2017, author = {G{\"o}tzelmann, Timo}, title = {<> 3D Printable Hand Exoskeleton for the Haptic Exploration of Virtual 3D Scenes}, series = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, volume = {2017}, booktitle = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5227-7}, doi = {10.1145/3056540.3064950}, pages = {63 -- 66}, year = {2017}, abstract = {Virtual reality is currently experiencing a comeback. A considerable market has developed for VR computer games and educational applications. Some solutions integrate tracked devices which allow users to freely move within a certain space. Virtual 3D model can be visually explored, implemented collision detected allows users to get a feedback for instance by sound or vibration. For research projects there are several approaches which offer to get the actual feedback for the fingers of a hand, when the users virtually touches the surface of a 3D model. However, in the consumer market currently no product is sold which offers this direct feedback for the whole hand. In this paper we introduce a low-cost hand exoskeleton which is usable in conjunction with commodity hardware. It covers each of the five fingers of the user's hand, its design is open-source, low-cost, can be customized and 3D printed by individuals. It aims at improving the haptic perception of users, bases of a popular physical computing platform and is designed to be assembled even by electronically unexperienced users. We show the integration of our lean interface of the wireless exoskeleton into exemplary VR environment and describe a calibration process which is flexible for customizations.}, language = {en} } @inproceedings{GoetzelmannWinkler2015, author = {G{\"o}tzelmann, Timo and Winkler, Klaus}, title = {SmartTactMaps: A Smartphone-Based Approach to Support Blind Persons in Exploring Tactile Maps}, series = {Proc. 8th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, volume = {2015}, booktitle = {Proc. 8th ACM International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-3452-5}, doi = {10.1145/2769493.2769497}, pages = {8}, year = {2015}, abstract = {Despite increasing digitalization of our society many blind persons still have very limited access to predominantly pictorial information such as maps. In this paper we introduce a novel approach to improve the accessibility of maps for blind users by utilizing the abilities of standard smartphones. A major issue of tactile maps is the limited discriminability of the humans' tactile sense. Textual annotation of maps is crucial, but adds much complexity to tactile maps. Additionally, only few Braille labels can be accommodated to maintain legibility. In our approach we link smartphones with adapted tactile maps which transforms the physical maps into interactive surfaces using both the tactile and the auditory modality. We integrate machine readable metadata into these maps which can be recognized by the smartphones' camera to immediately obtain detailed map descriptions from a free global database. During tactile exploration of the map, blind users can request auditory explanations by interacting with the mobile application. An experimental application and a user study demonstrate the feasibility of our approach.}, language = {en} } @inproceedings{GoetzelmannBranzHeidenreichetal.2017, author = {G{\"o}tzelmann, Timo and Branz, Lisa and Heidenreich, Claudia and Otto, Markus}, title = {A Personal Computer-based Approach for 3D-Printing Accessible to Blind People}, series = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {PETRA '17: Proceedings of the 10th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5227-7}, doi = {10.1145/3056540.3064954}, pages = {4}, year = {2017}, abstract = {Tactile materials play a major role in making information available to blind people and support their understanding for spatial matters. Due to the complex manual manufacturing process there is still a lack of suitable models for the visually impaired. Millions of 3D models are currently available on the internet and can be searched by dedicated retrieval sites. Most of them can be printed by 3D printers; however, this often isn't a trivial task even for sighted users. Blind peoples' self-dependence could be drastically increased if they were able to autonomously print 3D models at home. This paper analyses the individual tasks to actually print 3D models and adapts them to steps accessible for blind people. We introduce a workflow for the combined use of 3D printing software and consumer hardware. We verified our approach by a formal user study with visually impaired people which showed its feasibility.}, language = {en} } @inproceedings{KreimeierGoetzelmann2019, author = {Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {First Steps Towards Walk-In-Place Locomotion and Haptic Feedback in Virtual Reality for Visually Impaired}, series = {CHI EA '19: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}, volume = {2019}, booktitle = {CHI EA '19: Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-5971-9}, doi = {10.1145/3290607.3312944}, pages = {1 -- 6}, year = {2019}, abstract = {This paper presents the first results on a user study in which people with visual impairments (PVI) explored a virtual environment (VE) by walking in a virtual reality (VR) treadmill. As recently suggested, we have now acquired first results from our feasibility study investigating this walk-in-place interaction. This represents a new, more intuitive way of for example virtually exploring unknown spaces in advance. Our prototype consists of off-the-shelf VR components (i.e., treadmill, headphones, glasses, and controller) providing a simplified white cane simulation and was tested by six visually impaired subjects. Our results indicate that this interaction is yet difficult, but promising and an important step to make VR more and better usable for PVIs. As an impact on the CHI community, we would like to make this research field known to a wider audience by sharing our intermediate results and suggestions for improvements, on some of which we are already working on.}, language = {en} } @inproceedings{Goetzelmann2016, author = {G{\"o}tzelmann, Timo}, title = {LucentMaps: 3D Printed Audiovisual Tactile Maps for Blind and Visually Impaired People}, series = {ASSETS '16: Proceedings of the 18th International ACM SIGACCESS Conference on Computers and Accessibility}, volume = {2016}, booktitle = {ASSETS '16: Proceedings of the 18th International ACM SIGACCESS Conference on Computers and Accessibility}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-4124-0}, doi = {10.1145/2982142.2982163}, pages = {81 -- 90}, year = {2016}, abstract = {Tactile maps support blind and visually impaired people in orientation and to familiarize with unfamiliar environments. Interactive approaches complement these maps with auditory feedback. However, commonly these approaches focus on blind people. We present an approach which incorporates visually impaired people by visually augmenting relevant parts of tactile maps. These audiovisual tactile maps can be used in conjunction with common tablet computers and smartphones. By integrating conductive elements into 3D printed tactile maps, they can be recognized by a single touch on the mobile device's display, which eases the handling for blind and visually impaired people. To allow multiple elevation levels in our transparent tactile maps, we conducted a study to reconcile technical and physiological requirements of off-the-shelf 3D printers, capacitive touch inputs and the human tactile sense. We propose an interaction concept for 3D printed audiovisual tactile maps, verify its feasibility and test it with a user study. Our discussion includes economic considerations crucial for a broad dissemination of tactile maps for both blind and visually impaired people.}, language = {en} } @inproceedings{Goetzelmann2014, author = {G{\"o}tzelmann, Timo}, title = {Interactive Tactile Maps for Blind People using Smartphones' Integrated Cameras}, series = {Proc. 9th ACM International Conference on Interactive Tabletops and Surfaces (ITS'14)}, volume = {2014}, booktitle = {Proc. 9th ACM International Conference on Interactive Tabletops and Surfaces (ITS'14)}, publisher = {ACM}, address = {New York, NY, USA}, isbn = {978-1-4503-2587-5}, doi = {10.1145/2669485.2669550}, pages = {381 -- 385}, year = {2014}, abstract = {Tactile maps may support blind persons in orientation and understanding geographical relations, but their availability is still very limited. However, recent technologies such as 3D printers allow to autonomously print individual tactile maps which can be linked with interactive applications. Besides geographical depictions, textual annotation of maps is crucial. However, this often adds much complexity to tactile maps. To limit tactile complexity, interactive approaches may help to complement maps by the auditive modality. The presented approach integrates barcodes into tactile maps to allow their detection by standard smartphones' cameras. Automatically, more detailed map data is obtained to auditively support the exploration of the tactile map. Our experimental implementation shows the principal feasibility and provides the basis of ongoing comprehensive user studies.}, language = {en} } @inproceedings{WolfGoetzelmann2023, author = {Wolf, Philipp and G{\"o}tzelmann, Timo}, title = {A Systematic Review of Interaction Approaches based on Visually Evoked Potentials}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3594806.3594862}, pages = {396 -- 401}, year = {2023}, abstract = {BCIs are already present in the mass market. Visual evoked potentials (VEP) are based on the recognition of recurrent visual stimuli by specific BCI. A variety of approaches use this technology to implement a whole range of different application scenarios. This paper provides an overview of approaches that use VEP either as a stand-alone technology or in combination with other technologies. First, terms for different VEP technologies and their possible procedures are introduced. The papers are classified according to the technology used, the quality of the classification, the number of simultaneous stimuli, and different application areas. One focus of the paper is value input through VEP, especially for approaches that do not work in combination with other devices. It is shown that the value input has been addressed only very little so far.}, language = {en} } @inproceedings{Goetzelmann2015, author = {G{\"o}tzelmann, Timo}, title = {CapMaps: Capacitive Sensing 3D Printed Audio-Tactile Maps}, series = {Proc. 15th International Conference on Computers Helping People with Special Needs}, volume = {2015}, booktitle = {Proc. 15th International Conference on Computers Helping People with Special Needs}, publisher = {Springer}, address = {Cham}, isbn = {978-3-319-41266-5}, doi = {10.1007/978-3-319-41267-2_20}, pages = {146 -- 152}, year = {2015}, abstract = {Tactile maps can be useful tools for blind people for navigation and orientation tasks. Apart from static maps, there are techniques to augment tactile maps with audio content. They can be used to interact with the map content, to offer extra information and to reduce the tactile complexity of a map. Studies show that audio-tactile maps can be more efficient and satisfying for the user than pure tactile maps without audio feedback. A major challenge of audio-tactile maps is the linkage of tactile elements with audio content and interactivity. This paper introduces a novel approach to link 3D printed tactile maps with mobile devices, such as smartphones and tablets, in a flexible way to enable interactivity and audio-support. By integrating conductive filaments into the printed maps it seamlessly integrates into the 3D printing process. This allows to automatically recognize the tactile map by a single press at its corner. Additionally, the arrangement of the tactile map on the mobile device is flexible and detected automatically which eases the use of these maps. The practicability of this approach is shown by a dedicated feasibility study.}, language = {en} } @inproceedings{FeitlKreimeierGoetzelmann2022, author = {Feitl, Selina and Kreimeier, Julian and G{\"o}tzelmann, Timo}, title = {Accessible Electrostatic Surface Haptics: Towards an Interactive Audiotactile Map Interface for People With Visual Impairments}, series = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, booktitle = {Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3529190.3534781}, pages = {522 -- 531}, year = {2022}, abstract = {Tactile models, such as floor plans of a familiar or unfamiliar environment, can be helpful for people with visual impairments to grasp and interpret spatial information. Such plans are usually fabricated physically in a time-consuming process and are not interactive. This paper suggests presenting tactile floor plans using surface haptic feedback on an electrostatic display to overcome these limitations. Besides audiohaptically exploring tactile floor plans, our prototype also allows for voice interaction and demonstrates the control of smart home devices in this context. The evaluation was conducted in two stages with eight participants with visual impairments: First, it was investigated how individual rooms can be identified and assigned with electrostatic tactile feedback over a common dot matrix display. Second, the generation of a mental map when exploring an interactive detailed floor plan with several rooms was evaluated. Our results show that electrostatic haptic feedback enables people with visual impairments to recognize and understand graphic elements like rooms and a floor plan. However, this entails significantly more time for exploration and a higher cognitive workload when compared to a comparable but more expensive dot matrix display, though spatial and semantic smart home contextual information can be added through a voice interface, such as "What is the room's name I am touching?" or "Turn on the power socket in this room." Our preliminary but innovative approach reveals the initial potential of electrostatic feedback for accessibility and aims to help situate the opportunities and challenges in this context for wider dissemination.}, language = {en} } @inproceedings{GoetzelmannKargMueller2025, author = {G{\"o}tzelmann, Timo and Karg, Pascal and M{\"u}ller, Mareike}, title = {A Novel Concept using Mirror Displays for Ambient User Notifications}, publisher = {ACM}, doi = {10.1145/3733155.3733206}, pages = {315-323}, year = {2025}, abstract = {This paper presents a novel approach to notify users about tasks in their immediate environment. Our approach is consciously designed to be low-threshold, by using augmented mirror displays, no devices such as AR glasses or smartphones have to be worn or hold by the user. The mirror display allows people passing by to get awareness of which technical and non-technical objects that need attention in an unobtrusive way. These are conveyed visually in a uniform manner and can be easily observed by passing users. If the user decides to interact by slowing down or pausing, the display adapts to more detailed information about the augmented objects. Users can decide whether this is relevant to them or not. We present a novel interaction concept, several use cases, an implementation of our approach in a laboratory scenario and conduct a user study to evaluate the feasibility and effectiveness of our interaction concept. The results reveal a good performance of our system and based on this, open up new questions perspectives for future enhancements.}, language = {en} } @article{GoetzelmannKargMueller2025, author = {G{\"o}tzelmann, Timo and Karg, Pascal and M{\"u}ller, Mareike}, title = {Augminded: Ambient Mirror Display Notifications}, series = {Multimodal Technologies and Interaction}, volume = {9}, journal = {Multimodal Technologies and Interaction}, number = {9}, publisher = {MDPI}, address = {Basel, Switzerland}, issn = {2414-4088}, doi = {10.3390/mti9090093}, pages = {22}, year = {2025}, abstract = {This paper presents a new approach for providing contextual information in real-world environments. Our approach is consciously designed to be low-threshold; by using mirrors as augmented reality surfaces, no devices such as AR glasses or smartphones have to be worn or held by the user. It enables technical and non-technical objects in the environment to be visually highlighted and thus subtly draw the attention of people passing by. The presented technology enables the provision of information that can be viewed in more detail by the user if required by slowing down their movement. Users can decide whether this is relevant to them or not. A prototype system was implemented and evaluated through a user study. The results show a high level of acceptance and intuitive usability of the system, with participants being able to reliably perceive and process the information displayed. The technology thus offers promising potential for the unobtrusive and context-sensitive provision of information in various application areas. The paper discusses limitations of the system and outlines future research directions to further optimize the technology and extend its applicability.}, language = {en} }