@inproceedings{FettkeKastlOestreichetal.2023, author = {Fettke, Ulrike and Kastl, Andrea and Oestreich, Claudia and B{\"o}hm, Janna and Rauner, Yvonne and Ittlinger, Sabine}, title = {Live independently at home for as long as possible. Digital assistance systems as a decisive factor. Transformative Change in the Contested Fields of Housing and Care.}, series = {Transformative Change in the Contested Fields of Housing and Care. Conference book.}, booktitle = {Transformative Change in the Contested Fields of Housing and Care. Conference book.}, doi = {10.13140/RG.2.2.35834.09923}, year = {2023}, language = {en} } @inproceedings{FettkeKastlIttlinger2024, author = {Fettke, Ulrike and Kastl, Andrea and Ittlinger, Sabine}, title = {Independent Housing in your own home. Understanding what people want after a clinical rehabilitation stay}, series = {Imaginaries and Strategies for Good Care and Good Housing in Times of Transformation. Wien, 23. \& 24. Mai 2024}, booktitle = {Imaginaries and Strategies for Good Care and Good Housing in Times of Transformation. Wien, 23. \& 24. Mai 2024}, year = {2024}, language = {en} } @inproceedings{HorstmannshoffSollfrankMueller2021, author = {Horstmannshoff, Caren and Sollfrank, Tobias and M{\"u}ller, Martin}, title = {Intuitive interaction with cooperative assistance robots for the 3rd and 4th age: the KoBo34-project: How did we aim to actively involve all potential stakeholders?}, series = {International Research Week, Technical University of Applied Sciences Rosenheim}, booktitle = {International Research Week, Technical University of Applied Sciences Rosenheim}, year = {2021}, language = {en} } @inproceedings{FettkeGuennel2023, author = {Fettke, Ulrike and G{\"u}nnel, Sebastian}, title = {Live independently at home for as long as possible! (Digital) assistance systems as a decisive factor.}, series = {Conference Housing and Care. Linz}, booktitle = {Conference Housing and Care. Linz}, year = {2023}, language = {en} } @inproceedings{HeitzmannWohlschlaegerLeiteretal.2024, author = {Heitzmann, Sebastian and Wohlschl{\"a}ger, Maximilian and Leiter, Nina and L{\"o}der, Martin G. J. and Versen, Martin and Laforsch, Christian}, title = {Classification of Foods and Plastics using FD-FLIM and Neural Networks}, series = {2024 IEEE Sensors Applications Symposium (SAS)}, booktitle = {2024 IEEE Sensors Applications Symposium (SAS)}, publisher = {IEEE}, doi = {10.1109/SAS60918.2024.10636453}, pages = {1 -- 6}, year = {2024}, abstract = {Plastics and foods can be differentiated by their material characteristic fluorescence properties, especially their fluorescence lifetimes. An areal measurement of fluorescent lifetimes of these materials can be done using Frequency-Domain Fluorescence Lifetime Imaging Microscopy (FD-FLIM). Up until now, most plastic detection is done using NIR or X-ray, while most applications of FD-FLIM are in biomedicalfields. The application of FD-FLIM in a food safety setting presents a promising approach to the detection of plastic contaminants. A Multilayer Perceptron (MLP) based neural network is developed to reliably identify the presence of plastic in a food/plastic sample via FD-FLIM. Features like the mean, median, standard deviation, variance, range, and interquartile range are calculated from the intensity image, the phase shift and modulation index along with the according phase- and modulation-dependent fluorescence lifetimes from the FD-FLIM data. For training, test and validation, a total of 3520 FD-FLIM measurements have been taken at 445nm excitation of sixteen samples with the labels food and plastic. To rank the performance of the 3888 trained networks, Fl-score, accuracy, precision, and recall are used as metrics. The best performing network reaches a Fl-score of 98.86\% proving that a differentiation of foods and plastics using a MLP classification based on FD- FLIM data is possible with a low error rate.}, language = {en} } @inproceedings{LeiterSchwarzVersenetal.2024, author = {Leiter, Nina and Schwarz, Jonas and Versen, Martin and Risse, Michael and L{\"o}der, Martin G.J. and Laforsch, Christian}, title = {A non-destructive approach to wood origin differentiation using FD-FLIM}, series = {2024 IEEE Sensors Applications Symposium (SAS)}, booktitle = {2024 IEEE Sensors Applications Symposium (SAS)}, publisher = {IEEE}, doi = {10.1109/SAS60918.2024.10636550}, pages = {1 -- 6}, year = {2024}, abstract = {Wood auto-fluorescence, primarily attributed to lignin, presents a distinctive feature. Different wood species exhibit variations in lignin distribution. Frequency-Domain Fluorescence Lifetime Imaging Microscopy is effective in distinguishing wood species based on their fluorescence characteristics. This study investigates the potential to differentiate the origins of beech, spruce, and larch through phase-dependent fluorescence decay times. Therefore the zero hypothesis H0 is tested: The phase dependent fluorescence lifetimes of samples from the same species but varying in origin are equal. To determine the fluorescence characteristics of woods of different origins, wood samples of the species Fagus sylvatica L. (beech), Larix decidua Mill. (larch), and Picea abies (L.) H. Karst. (spruce) from Germany, Austria, the Netherlands, Spain, Sweden, New Zealand and Romania were analyzed. The wood samples were analyzed with a FD-FLIM camera setup, including a laser source emitting at an excitation wavelength of 445 nm. Employing Analysis-of-Variance hypothesis testing on fluorescence lifetime data for each wood species, the results indicate that 23 out of 35 origin pairs could be distinguished at a 5\% significance level. While acknowledging the challenges of origin-based differentiation, the findings emphasize the promising potential of fluorescence lifetime imaging microscope as a valuable tool in this context. Moving forward, a more intricate approach to sample differentiation should involve acquiring detailed information about the samples, including associated temperature and precipitation profiles, and soil composition.}, language = {en} } @inproceedings{FischerMichalkBogenberger2025, author = {Fischer, Markus and Michalk, Wibke and Bogenberger, Klaus}, title = {CDRpy: Data-Driven Decision Support for the Roll-out of public Charging Infrastructure in urban Areas}, series = {Transportation Research Board Annual Meeting}, booktitle = {Transportation Research Board Annual Meeting}, editor = {Transportation Research Board,}, pages = {21}, year = {2025}, abstract = {In this study, the data-driven simulation framework CDRpy is presented, validated and applied using the example of the city of Munich to support decision-makers in the rollout of public charging infrastructure in urban areas. The innovation of the study lies in the fact that CDRpy can be used to evaluate different concepts for public charging infrastructure at a detailed level. The Demand is mapped based on real charging events for different types of electric vehicles and pricing models, and the evaluations can be broken down to the level of individual charging stations. The developed framework was validated using charge detail records from the largest operator of public charging infrastructure in Munich for the years 2020, 2021 and 2022 based on various criteria. In a further case study, the additional demand for public charging infrastructure in the city was estimated for the year 2030. The results indicate that mixed and timebased pricing models could reduce the required number of public charging points from 7,055 to 6,374 by around 10\% compared to energy-based pricing models. Determining the expansion requirements for each sub-district shows that the additional demand for public charging infrastructure varies greatly across the city. Since both technological and economic perspectives are considered, the study is relevant for all decisionmakers involved in the planning, operation and scaling of public charging infrastructure.}, language = {en} } @inproceedings{AlasmarNeumayerBucker2024, author = {Alasmar, Odai and Neumayer, Martin and Bucker, Dominikus}, title = {Data Augmentation Technique for Dealing with Multi-Resolution Issues in Segmentation of Photovoltaic Systems in Aerial Imagery}, series = {2024 International Conference on Electrical, Computer and Energy Technologies (ICECET}, booktitle = {2024 International Conference on Electrical, Computer and Energy Technologies (ICECET}, publisher = {IEEE}, doi = {10.1109/ICECET61485.2024.10698167}, pages = {1 -- 6}, year = {2024}, abstract = {Given the urgent global challenges posed by climate change the transition to renewable energy and the reduction of carbon emissions is of paramount importance. Automatically detecting photovoltaic (PV) systems in aerial imagery is crucial for understanding, planning and optimizing our energy infras- tructure. However, the task is complicated by the significant variability in the ground sampling distance (GSD) of available aerial imagery. This directly affects the spatial resolution and consequently the quality and applicability of training data for deep learning models. Typically, available datasets have high- resolution imagery, making them preferable for annotation due to their detailed visual information. In contrast, real-world applications often have to deal with lower-resolution images. This discrepancy poses a challenge for training models that can make accurate predictions under varying real-world conditions. Our research presents an approach that deliberately degrades the resolution of high-quality training images to match the lower- resolution images encountered in real-world applications. By training on degraded resolution data, we ensure that the models are not overly tuned to high-resolution features that are often not present in the target application scenarios. The comparative analysis of three state-of-the-art models, with and without ap- plying our resolution degradation method, shows a considerable improvement in prediction accuracy and model generalisability when applying our resolution degradation method. This study highlights the importance of matching the resolution of training data to that of real-world applications to develop robust and universally applicable PV detection models.}, language = {en} } @inproceedings{TetterooFegerFoglietal.2024, author = {Tetteroo, Daniel and Feger, Sebastian and Fogli, Daniela and M{\o}rch, Anders and Piccinno, Antonio and Utterberg Mod{\´e}n, Marie}, title = {Participatory Design \& End-User Development: Building Bridges}, series = {Adjunct Proceedings of the 2024 Nordic Conference on Human-Computer Interaction}, booktitle = {Adjunct Proceedings of the 2024 Nordic Conference on Human-Computer Interaction}, doi = {10.1145/3677045.3685466}, year = {2024}, abstract = {Empowering end-users to be actively involved in the design, development and implementation of systems is a shared goal of the participatory design and end-user development communities. Yet, both communities have developed largely separately, both building upon their own specific set of knowledge, methods and practices. This workshop aims to identify common goals and a shared research agenda by bringing together researchers from both communities and stimulating the exchange of knowledge and the generation of new ideas}, language = {en} } @inproceedings{SchulzePanuysenDaumeetal.2024, author = {Schulze, Achim and Panuysen, Markus and Daume, Darwin and Sch{\"o}nau, Maximilian}, title = {Quantitative Shade Detection for PV-Systems based on Clearsky Data}, series = {Proceedings of the 41st EU PVSEC}, booktitle = {Proceedings of the 41st EU PVSEC}, pages = {3}, year = {2024}, abstract = {In modern monitoring applications, losses from shading should be quantitatively separated from other loss mechanisms such as soiling, degradation or total failures. Based on clearsky reference data, we introduce a shading matrix which enables accurate hourly description of shading losses of PV strings over the year. This matrix can be used to improve failure detections and reference yield data in PV systems. Furthermore, a shading matrix with respect to sun position is introduced to improve the spatial understanding of shading caused by surrounding objects.}, language = {en} }